summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kmod.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c
index d38b2dab99a7..da98d0593de2 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -265,15 +265,9 @@ out:
do_exit(0);
}
-/*
- * Handles UMH_WAIT_PROC. Our parent (unbound workqueue) might not be able to
- * run enough instances to handle usermodehelper completions without blocking
- * some other pending requests. That's why we use a kernel thread dedicated for
- * that purpose.
- */
-static int call_usermodehelper_exec_sync(void *data)
+/* Handles UMH_WAIT_PROC. */
+static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
{
- struct subprocess_info *sub_info = data;
pid_t pid;
/* If SIGCLD is ignored sys_wait4 won't populate the status. */
@@ -287,9 +281,9 @@ static int call_usermodehelper_exec_sync(void *data)
* Normally it is bogus to call wait4() from in-kernel because
* wait4() wants to write the exit code to a userspace address.
* But call_usermodehelper_exec_sync() always runs as kernel
- * thread and put_user() to a kernel address works OK for kernel
- * threads, due to their having an mm_segment_t which spans the
- * entire address space.
+ * thread (workqueue) and put_user() to a kernel address works
+ * OK for kernel threads, due to their having an mm_segment_t
+ * which spans the entire address space.
*
* Thus the __user pointer cast is valid here.
*/
@@ -304,19 +298,21 @@ static int call_usermodehelper_exec_sync(void *data)
sub_info->retval = ret;
}
+ /* Restore default kernel sig handler */
+ kernel_sigaction(SIGCHLD, SIG_IGN);
+
umh_complete(sub_info);
- do_exit(0);
}
/*
- * This function doesn't strictly needs to be called asynchronously. But we
- * need to create the usermodehelper kernel threads from a task that is affine
+ * We need to create the usermodehelper kernel thread from a task that is affine
* to an optimized set of CPUs (or nohz housekeeping ones) such that they
* inherit a widest affinity irrespective of call_usermodehelper() callers with
* possibly reduced affinity (eg: per-cpu workqueues). We don't want
* usermodehelper targets to contend a busy CPU.
*
- * Unbound workqueues provide such wide affinity.
+ * Unbound workqueues provide such wide affinity and allow to block on
+ * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
*
* Besides, workqueues provide the privilege level that caller might not have
* to perform the usermodehelper request.
@@ -326,18 +322,18 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
{
struct subprocess_info *sub_info =
container_of(work, struct subprocess_info, work);
- pid_t pid;
- if (sub_info->wait & UMH_WAIT_PROC)
- pid = kernel_thread(call_usermodehelper_exec_sync, sub_info,
- CLONE_FS | CLONE_FILES | SIGCHLD);
- else
+ if (sub_info->wait & UMH_WAIT_PROC) {
+ call_usermodehelper_exec_sync(sub_info);
+ } else {
+ pid_t pid;
+
pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
SIGCHLD);
-
- if (pid < 0) {
- sub_info->retval = pid;
- umh_complete(sub_info);
+ if (pid < 0) {
+ sub_info->retval = pid;
+ umh_complete(sub_info);
+ }
}
}
OpenPOWER on IntegriCloud