summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2006-04-24 19:35:21 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-26 08:30:03 -0700
commit83d722f7e198b034699b1500d98729beff930efd (patch)
tree7d790a2fd62165373ec7bacde704837288e0bec3
parent649bbaa484bcdce94f40a1b97a6a2ded0549e8a2 (diff)
downloadblackbird-op-linux-83d722f7e198b034699b1500d98729beff930efd.tar.gz
blackbird-op-linux-83d722f7e198b034699b1500d98729beff930efd.zip
[PATCH] Remove __devinit and __cpuinit from notifier_call definitions
Few of the notifier_chain_register() callers use __init in the definition of notifier_call. It is incorrect as the function definition should be available after the initializations (they do not unregister them during initializations). This patch fixes all such usages to _not_ have the notifier_call __init section. Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mce_amd.c2
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/workqueue.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c2
19 files changed, 19 insertions, 19 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9df87b03612c..c8547a6fa7e6 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -642,7 +642,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
return;
}
-static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int cacheinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 6386f63c413e..859fb37ff49b 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int __devinit palinfo_cpu_callback(struct notifier_block *nfb,
+static int palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 9d5a823479a3..663a186ad194 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
};
#ifdef CONFIG_HOTPLUG_CPU
-static int __devinit
+static int
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index b47476d655f1..7da4739f536e 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -429,7 +429,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+static int cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0235eb7ecba6..ed737cacf92d 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit sysfs_cpu_notify(struct notifier_block *self,
+static int sysfs_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 6f0790e8b6d3..c69fc43cee7b 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
#endif
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static __cpuinit int
+static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index d3ad7d81266d..d13b241ad094 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -482,7 +482,7 @@ static void threshold_remove_device(unsigned int cpu)
#endif
/* get notified when a cpu comes on/off */
-static __cpuinit int threshold_cpu_callback(struct notifier_block *nfb,
+static int threshold_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/* cpu was unsigned int to begin with */
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 915810f6237e..8c52421cbc54 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
return 0;
}
-static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
+static int topology_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9759d05b1972..29b2fa5534ae 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1497,7 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
+static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 4b63bb9797b2..b7f0388bd71c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -836,7 +836,7 @@ static void migrate_hrtimers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
+static int hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/kernel/profile.c b/kernel/profile.c
index 5a730fdb1a2c..68afe121e507 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
-static int __devinit profile_cpu_callback(struct notifier_block *info,
+static int profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 193e5a0df198..6d32ff26f948 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -520,7 +520,7 @@ static void __devinit rcu_online_cpu(int cpu)
tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
}
-static int __devinit rcu_cpu_notify(struct notifier_block *self,
+static int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index a13f2b342e4b..336f92d64e2e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index dabebac50868..14c7faf02909 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int __devinit
+static int
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
diff --git a/kernel/timer.c b/kernel/timer.c
index d355d5a4d5ae..67eaf0f54096 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1314,7 +1314,7 @@ static void __devinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int __devinit timer_cpu_notify(struct notifier_block *self,
+static int timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e9e464a90376..880fb415a8f6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -547,7 +547,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
}
/* We're holding the cpucontrol mutex here */
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 123c60586740..ea77c999047e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/mm/slab.c b/mm/slab.c
index e6ef9bd52335..af5c5237e11a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1036,7 +1036,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
#endif
-static int __devinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index acdf001d6941..4649a63a8cb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1328,7 +1328,7 @@ repeat:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;
OpenPOWER on IntegriCloud