summaryrefslogtreecommitdiffstats
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 16:59:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 16:59:10 -0700
commit8e3ade251bc7c0a4f0777df4dd34343a03efadba (patch)
tree6c0b78731e3d6609057951d07660efbd90992ad0 /fs/eventpoll.c
parente317234975cb7463b8ca21a93bb6862d9dcf113f (diff)
parente075f59152890ffd7e3d704afc997dd686c8a781 (diff)
downloadblackbird-op-linux-8e3ade251bc7c0a4f0777df4dd34343a03efadba.tar.gz
blackbird-op-linux-8e3ade251bc7c0a4f0777df4dd34343a03efadba.zip
Merge branch 'akpm' (Andrew's patch-bomb)
Merge second batch of patches from Andrew Morton: - various misc things - core kernel changes to prctl, exit, exec, init, etc. - kernel/watchdog.c updates - get_maintainer - MAINTAINERS - the backlight driver queue - core bitops code cleanups - the led driver queue - some core prio_tree work - checkpatch udpates - largeish crc32 update - a new poll() feature for the v4l guys - the rtc driver queue - fatfs - ptrace - signals - kmod/usermodehelper updates - coredump - procfs updates * emailed from Andrew Morton <akpm@linux-foundation.org>: (141 commits) seq_file: add seq_set_overflow(), seq_overflow() proc-ns: use d_set_d_op() API to set dentry ops in proc_ns_instantiate(). procfs: speed up /proc/pid/stat, statm procfs: add num_to_str() to speed up /proc/stat proc: speed up /proc/stat handling fs/proc/kcore.c: make get_sparsemem_vmemmap_info() static coredump: add VM_NODUMP, MADV_NODUMP, MADV_CLEAR_NODUMP coredump: remove VM_ALWAYSDUMP flag kmod: make __request_module() killable kmod: introduce call_modprobe() helper usermodehelper: ____call_usermodehelper() doesn't need do_exit() usermodehelper: kill umh_wait, renumber UMH_* constants usermodehelper: implement UMH_KILLABLE usermodehelper: introduce umh_complete(sub_info) usermodehelper: use UMH_WAIT_PROC consistently signal: zap_pid_ns_processes: s/SEND_SIG_NOINFO/SEND_SIG_FORCED/ signal: oom_kill_task: use SEND_SIG_FORCED instead of force_sig() signal: cosmetic, s/from_ancestor_ns/force/ in prepare_signal() paths signal: give SEND_SIG_FORCED more power to beat SIGNAL_UNKILLABLE Hexagon: use set_current_blocked() and block_sigmask() ...
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c45
1 files changed, 40 insertions, 5 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4d9d3a45e356..629e9ed99d0f 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -427,6 +427,31 @@ out_unlock:
return error;
}
+/*
+ * As described in commit 0ccf831cb lockdep: annotate epoll
+ * the use of wait queues used by epoll is done in a very controlled
+ * manner. Wake ups can nest inside each other, but are never done
+ * with the same locking. For example:
+ *
+ * dfd = socket(...);
+ * efd1 = epoll_create();
+ * efd2 = epoll_create();
+ * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
+ * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
+ *
+ * When a packet arrives to the device underneath "dfd", the net code will
+ * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
+ * callback wakeup entry on that queue, and the wake_up() performed by the
+ * "dfd" net code will end up in ep_poll_callback(). At this point epoll
+ * (efd1) notices that it may have some event ready, so it needs to wake up
+ * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
+ * that ends up in another wake_up(), after having checked about the
+ * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
+ * avoid stack blasting.
+ *
+ * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
+ * this special case of epoll.
+ */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
unsigned long events, int subclass)
@@ -699,9 +724,12 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct epitem *epi, *tmp;
+ poll_table pt;
+ init_poll_funcptr(&pt, NULL);
list_for_each_entry_safe(epi, tmp, head, rdllink) {
- if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
+ pt._key = epi->event.events;
+ if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
epi->event.events)
return POLLIN | POLLRDNORM;
else {
@@ -1049,13 +1077,11 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
*/
static int reverse_path_check(void)
{
- int length = 0;
int error = 0;
struct file *current_file;
/* let's call this for all tfiles */
list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
- length++;
path_count_init();
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
reverse_path_check_proc, current_file,
@@ -1097,6 +1123,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
+ epq.pt._key = event->events;
/*
* Attach the item to the poll hooks and get current event bits.
@@ -1191,6 +1218,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
{
int pwake = 0;
unsigned int revents;
+ poll_table pt;
+
+ init_poll_funcptr(&pt, NULL);
/*
* Set the new event interest mask before calling f_op->poll();
@@ -1198,13 +1228,14 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
* f_op->poll() call and the new event set registering.
*/
epi->event.events = event->events;
+ pt._key = event->events;
epi->event.data = event->data; /* protected by mtx */
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
- revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
+ revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
/*
* If the item is "hot" and it is not registered inside the ready
@@ -1239,6 +1270,9 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
unsigned int revents;
struct epitem *epi;
struct epoll_event __user *uevent;
+ poll_table pt;
+
+ init_poll_funcptr(&pt, NULL);
/*
* We can loop without lock because we are passed a task private list.
@@ -1251,7 +1285,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
list_del_init(&epi->rdllink);
- revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
+ pt._key = epi->event.events;
+ revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
epi->event.events;
/*
OpenPOWER on IntegriCloud