summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-10-18 23:40:40 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 11:53:43 -0700
commitba25f9dcc4ea6e30839fcab5a5516f2176d5bfed (patch)
tree3123c03b25dd5c0cd24b6ab4fc16731217838157 /kernel
parent9a2e70572e94e21e7ec4186702d045415422bda0 (diff)
downloadblackbird-op-linux-ba25f9dcc4ea6e30839fcab5a5516f2176d5bfed.tar.gz
blackbird-op-linux-ba25f9dcc4ea6e30839fcab5a5516f2176d5bfed.zip
Use helpers to obtain task pid in printks
The task_struct->pid member is going to be deprecated, so start using the helpers (task_pid_nr/task_pid_vnr/task_pid_nr_ns) in the kernel. The first thing to start with is the pid, printed to dmesg - in this case we may safely use task_pid_nr(). Besides, printks produce more (much more) than a half of all the explicit pid usage. [akpm@linux-foundation.org: git-drm went and changed lots of stuff] Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Cc: Dave Airlie <airlied@linux.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/rtmutex-debug.c15
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/workqueue.c2
9 files changed, 31 insertions, 26 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a21f71af9d81..ebf6647a2bd4 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -98,7 +98,8 @@ static inline void check_for_tasks(int cpu)
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %x) \n",
- p->comm, p->pid, cpu, p->state, p->flags);
+ p->comm, task_pid_nr(p), cpu,
+ p->state, p->flags);
}
write_unlock_irq(&tasklist_lock);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 6838d4d77e05..7dab2defec63 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -959,7 +959,7 @@ fastcall NORET_TYPE void do_exit(long code)
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
- current->comm, current->pid,
+ current->comm, task_pid_nr(current),
preempt_count());
acct_update_integrals(tsk);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b5392ff7e6a6..55fe0c7cd95f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -511,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr)
int i, depth = curr->lockdep_depth;
if (!depth) {
- printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
+ printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
return;
}
printk("%d lock%s held by %s/%d:\n",
- depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
+ depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
@@ -904,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
print_kernel_version();
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(check_source);
printk("\nbut task is already holding lock:\n");
print_lock(check_target);
@@ -1085,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr,
print_kernel_version();
printk( "------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
- curr->comm, curr->pid,
+ curr->comm, task_pid_nr(curr),
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
curr->hardirqs_enabled,
@@ -1237,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
print_kernel_version();
printk( "---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(next);
printk("\nbut task is already holding lock:\n");
print_lock(prev);
@@ -1641,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
usage_str[prev_bit], usage_str[new_bit]);
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
- curr->comm, curr->pid,
+ curr->comm, task_pid_nr(curr),
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
trace_hardirqs_enabled(curr),
@@ -1694,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
print_kernel_version();
printk( "---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(this);
if (forwards)
printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
@@ -2487,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk( "[ BUG: bad unlock balance detected! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is trying to release lock (",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
@@ -2737,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk( "[ BUG: bad contention detected! ]\n");
printk( "---------------------------------\n");
printk("%s/%d is trying to contend lock (",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
@@ -3072,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
printk( "[ BUG: held lock freed! ]\n");
printk( "-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
- curr->comm, curr->pid, mem_from, mem_to-1);
+ curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
lockdep_print_held_locks(curr);
@@ -3125,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr)
printk( "[ BUG: lock held at task exit time! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 6b0703db152d..56d73cb8826d 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -87,7 +87,7 @@ static int rt_trace_on = 1;
static void printk_task(struct task_struct *p)
{
if (p)
- printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
else
printk("<none>");
}
@@ -152,22 +152,25 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk( "[ BUG: circular locking deadlock detected! ]\n");
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
- task->comm, task->pid, current->comm, current->pid);
+ task->comm, task_pid_nr(task),
+ current->comm, task_pid_nr(current));
printk("\n1) %s/%d is trying to acquire this lock:\n",
- current->comm, current->pid);
+ current->comm, task_pid_nr(current));
printk_lock(waiter->lock, 1);
- printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
+ printk("\n2) %s/%d is blocked on this lock:\n",
+ task->comm, task_pid_nr(task));
printk_lock(waiter->deadlock_lock, 1);
debug_show_held_locks(current);
debug_show_held_locks(task);
- printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
+ printk("\n%s/%d's [blocked] stackdump:\n\n",
+ task->comm, task_pid_nr(task));
show_stack(task, NULL);
printk("\n%s/%d's [current] stackdump:\n\n",
- current->comm, current->pid);
+ current->comm, task_pid_nr(current));
dump_stack();
debug_show_all_locks();
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 8cd9bd2cdb34..0deef71ff8d2 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -185,7 +185,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth,
- top_task->comm, top_task->pid);
+ top_task->comm, task_pid_nr(top_task));
}
put_task_struct(task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 9d458504e3a6..a7e30462600f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3502,7 +3502,7 @@ EXPORT_SYMBOL(sub_preempt_count);
static noinline void __schedule_bug(struct task_struct *prev)
{
printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
- prev->comm, preempt_count(), prev->pid);
+ prev->comm, preempt_count(), task_pid_nr(prev));
debug_show_held_locks(prev);
if (irqs_disabled())
print_irqtrace_events(prev);
@@ -4865,7 +4865,8 @@ static void show_task(struct task_struct *p)
free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
- printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid);
+ printk(KERN_CONT "%5lu %5d %6d\n", free,
+ task_pid_nr(p), task_pid_nr(p->parent));
if (state != TASK_RUNNING)
show_stack(p, NULL);
@@ -5172,7 +5173,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
if (p->mm && printk_ratelimit())
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
- p->pid, p->comm, dead_cpu);
+ task_pid_nr(p), p->comm, dead_cpu);
}
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 08364e75bb58..12006308c7eb 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -730,7 +730,7 @@ int print_fatal_signals;
static void print_fatal_signal(struct pt_regs *regs, int signr)
{
printk("%s/%d: potentially unexpected fatal signal %d.\n",
- current->comm, current->pid, signr);
+ current->comm, task_pid_nr(current), signr);
#ifdef __i386__
printk("code at %08lx: ", regs->eip);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index edeeef3a6a32..11df812263c8 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -113,7 +113,7 @@ void softlockup_tick(void)
spin_lock(&print_lock);
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
this_cpu, now - touch_timestamp,
- current->comm, current->pid);
+ current->comm, task_pid_nr(current));
if (regs)
show_regs(regs);
else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d1916fea7108..52d5e7c9a8e6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,7 +282,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(),
- current->pid);
+ task_pid_nr(current));
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
OpenPOWER on IntegriCloud