summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h85
1 files changed, 48 insertions, 37 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 62e6314382f0..541f4828f5e7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -35,6 +35,7 @@
#include <linux/topology.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
+#include <linux/futex.h>
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
@@ -99,6 +100,7 @@ DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
+extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
#include <linux/time.h>
@@ -206,11 +208,11 @@ extern void update_process_times(int user);
extern void scheduler_tick(void);
#ifdef CONFIG_DETECT_SOFTLOCKUP
-extern void softlockup_tick(struct pt_regs *regs);
+extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
#else
-static inline void softlockup_tick(struct pt_regs *regs)
+static inline void softlockup_tick(void)
{
}
static inline void spawn_softlockup_task(void)
@@ -354,16 +356,8 @@ struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
- struct rcu_head rcu;
};
-extern void sighand_free_cb(struct rcu_head *rhp);
-
-static inline void sighand_free(struct sighand_struct *sp)
-{
- call_rcu(&sp->rcu, sighand_free_cb);
-}
-
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
@@ -402,6 +396,7 @@ struct signal_struct {
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
+ struct task_struct *tsk;
ktime_t it_real_incr;
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
@@ -489,6 +484,7 @@ struct signal_struct {
#define MAX_PRIO (MAX_RT_PRIO + 40)
#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
/*
* Some day this will be a full-fledged user tracking system..
@@ -689,6 +685,13 @@ static inline void prefetch_stack(struct task_struct *t) { }
struct audit_context; /* See audit.c */
struct mempolicy;
+enum sleep_type {
+ SLEEP_NORMAL,
+ SLEEP_NONINTERACTIVE,
+ SLEEP_INTERACTIVE,
+ SLEEP_INTERRUPTED,
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
@@ -706,11 +709,12 @@ struct task_struct {
prio_array_t *array;
unsigned short ioprio;
+ unsigned int btrace_seq;
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;
unsigned long long sched_time; /* sched_clock time spent running */
- int activated;
+ enum sleep_type sleep_type;
unsigned long policy;
cpumask_t cpus_allowed;
@@ -756,7 +760,8 @@ struct task_struct {
struct task_struct *group_leader; /* threadgroup leader */
/* PID/PID hash table linkage. */
- struct pid pids[PIDTYPE_MAX];
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
@@ -868,7 +873,13 @@ struct task_struct {
struct cpuset *cpuset;
nodemask_t mems_allowed;
int cpuset_mems_generation;
+ int cpuset_mem_spread_rotor;
+#endif
+ struct robust_list_head __user *robust_list;
+#ifdef CONFIG_COMPAT
+ struct compat_robust_list_head __user *compat_robust_list;
#endif
+
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
};
@@ -888,18 +899,19 @@ static inline pid_t process_group(struct task_struct *tsk)
*/
static inline int pid_alive(struct task_struct *p)
{
- return p->pids[PIDTYPE_PID].nr != 0;
+ return p->pids[PIDTYPE_PID].pid != NULL;
}
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct_cb(struct rcu_head *rhp);
+extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
if (atomic_dec_and_test(&t->usage))
- call_rcu(&t->rcu, __put_task_struct_cb);
+ __put_task_struct(t);
}
/*
@@ -928,6 +940,9 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
+#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */
+#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
+#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1089,7 +1104,6 @@ extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern int kill_pg(pid_t, int, int);
-extern int kill_sl(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
@@ -1146,10 +1160,8 @@ extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
-extern void exit_signal(struct task_struct *);
-extern void __exit_signal(struct task_struct *);
-extern void exit_sighand(struct task_struct *);
-extern void __exit_sighand(struct task_struct *);
+extern void __cleanup_signal(struct signal_struct *);
+extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern NORET_TYPE void do_group_exit(int);
@@ -1173,19 +1185,7 @@ extern void wait_task_inactive(task_t * p);
#endif
#define remove_parent(p) list_del_init(&(p)->sibling)
-#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
-
-#define REMOVE_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_del_init(&(p)->tasks); \
- remove_parent(p); \
- } while (0)
-
-#define SET_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_add_tail(&(p)->tasks,&init_task.tasks); \
- add_parent(p, (p)->parent); \
- } while (0)
+#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
@@ -1203,20 +1203,22 @@ extern void wait_task_inactive(task_t * p);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
-extern task_t * FASTCALL(next_thread(const task_t *p));
-
#define thread_group_leader(p) (p->pid == p->tgid)
+static inline task_t *next_thread(task_t *p)
+{
+ return list_entry(rcu_dereference(p->thread_group.next),
+ task_t, thread_group);
+}
+
static inline int thread_group_empty(task_t *p)
{
- return list_empty(&p->pids[PIDTYPE_TGID].pid_list);
+ return list_empty(&p->thread_group);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern void unhash_process(struct task_struct *p);
-
/*
* Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -1236,6 +1238,15 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+}
+
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) (task)->thread_info
OpenPOWER on IntegriCloud