summaryrefslogtreecommitdiffstats
path: root/include/linux/sched
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched')
-rw-r--r--include/linux/sched/mm.h37
-rw-r--r--include/linux/sched/signal.h49
-rw-r--r--include/linux/sched/sysctl.h1
3 files changed, 83 insertions, 4 deletions
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 44d356f5e47c..aebb370a0006 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -248,6 +248,43 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
+#ifdef CONFIG_MEMCG
+/**
+ * memalloc_use_memcg - Starts the remote memcg charging scope.
+ * @memcg: memcg to charge.
+ *
+ * This function marks the beginning of the remote memcg charging scope. All the
+ * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
+ * given memcg.
+ *
+ * NOTE: This function is not nesting safe.
+ */
+static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
+{
+ WARN_ON_ONCE(current->active_memcg);
+ current->active_memcg = memcg;
+}
+
+/**
+ * memalloc_unuse_memcg - Ends the remote memcg charging scope.
+ *
+ * This function marks the end of the remote memcg charging scope started by
+ * memalloc_use_memcg().
+ */
+static inline void memalloc_unuse_memcg(void)
+{
+ current->active_memcg = NULL;
+}
+#else
+static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void memalloc_unuse_memcg(void)
+{
+}
+#endif
+
#ifdef CONFIG_MEMBARRIER
enum {
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 113d1ad1ced7..4e9b77fb702d 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -69,6 +69,11 @@ struct thread_group_cputimer {
bool checking_timer;
};
+struct multiprocess_signals {
+ sigset_t signal;
+ struct hlist_node node;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -90,6 +95,9 @@ struct signal_struct {
/* shared signal handling: */
struct sigpending shared_pending;
+ /* For collecting multiprocess signals during fork */
+ struct hlist_head multiprocess;
+
/* thread group exit support */
int group_exit_code;
/* overloaded:
@@ -146,7 +154,8 @@ struct signal_struct {
#endif
- struct pid *leader_pid;
+ /* PID/PID hash table linkage. */
+ struct pid *pids[PIDTYPE_MAX];
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
@@ -329,7 +338,7 @@ extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline int restart_syscall(void)
@@ -371,6 +380,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
+extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
@@ -383,6 +393,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
+void task_join_group_stop(struct task_struct *task);
+
#ifdef TIF_RESTORE_SIGMASK
/*
* Legacy restore_sigmask accessors. These are inefficient on
@@ -556,6 +568,37 @@ extern bool current_is_single_threaded(void);
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+{
+ struct pid *pid;
+ if (type == PIDTYPE_PID)
+ pid = task_pid(task);
+ else
+ pid = task->signal->pids[type];
+ return pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_TGID];
+}
+
+/*
+ * Without tasklist or RCU lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_PGID];
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_SID];
+}
+
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
@@ -574,7 +617,7 @@ static inline bool thread_group_leader(struct task_struct *p)
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
- return task_pid(p) == p->signal->leader_pid;
+ return task_pid(p) == task_tgid(p);
}
static inline
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..913488d828cb 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
OpenPOWER on IntegriCloud