diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-07 09:35:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-07 09:35:54 -0700 |
commit | 7244d545c123ff33a3a5bff7fcb96a07778c32d1 (patch) | |
tree | c4d2893faa844e4cd6e2b47bbd2663dc28d9af83 /arch | |
parent | 143a275984b37058d2d3ab1ec0e5be9026fda24d (diff) | |
parent | e7e8cc5ae63c39dbbbb5f14c5120bdf2d931fac9 (diff) | |
download | blackbird-op-linux-7244d545c123ff33a3a5bff7fcb96a07778c32d1.tar.gz blackbird-op-linux-7244d545c123ff33a3a5bff7fcb96a07778c32d1.zip |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
[VIDEO] sunxvr500fb: Fix pseudo_palette array size
[VIDEO] sunxvr2500fb: Fix pseudo_palette array size
[VIDEO] ffb: The pseudo_palette is only 16 elements long
[VIDEO]: Fix section mismatch warning in promcon.
[ATA]: Back out bogus (SPARC64 && !PCI) Kconfig depends.
[SPARC64]: Fill in gaps in non-PCI dma_*() NOP implementation.
[SPARC64]: Fix {mc,smt}_capable().
[SPARC64]: Make core and sibling groups equal on UltraSPARC-IV.
[SPARC64]: Proper multi-core scheduling support.
[SPARC64]: Provide mmu statistics via sysfs.
[SPARC64]: Fix service channel hypervisor function names.
[SPARC64]: Export basic cpu properties via sysfs.
[SPARC64]: Move topology init code into new file, sysfs.c
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/Kconfig | 9 | ||||
-rw-r--r-- | arch/sparc64/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 62 | ||||
-rw-r--r-- | arch/sparc64/kernel/mdesc.c | 53 | ||||
-rw-r--r-- | arch/sparc64/kernel/prom.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 19 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 21 | ||||
-rw-r--r-- | arch/sparc64/kernel/sysfs.c | 297 |
8 files changed, 428 insertions, 42 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index bd00f89eed1e..89a1b469b93d 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -396,6 +396,15 @@ config SCHED_SMT when dealing with UltraSPARC cpus at a cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config CMDLINE_BOOL diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index d8d19093d12f..f964bf28d21a 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $ +# # Makefile for the linux kernel. # @@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror extra-y := head.o init_task.o vmlinux.lds obj-y := process.o setup.o cpu.o idprom.o \ - traps.o auxio.o una_asm.o \ + traps.o auxio.o una_asm.o sysfs.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \ unaligned.o central.o pci.o starfire.o semaphore.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index ed712e0b3372..7d1a11822a1e 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -2514,9 +2514,9 @@ sun4v_ncs_request: nop .size sun4v_ncs_request, .-sun4v_ncs_request - .globl sun4v_scv_send - .type sun4v_scv_send,#function -sun4v_scv_send: + .globl sun4v_svc_send + .type sun4v_svc_send,#function +sun4v_svc_send: save %sp, -192, %sp mov %i0, %o0 mov %i1, %o1 @@ -2526,11 +2526,11 @@ sun4v_scv_send: stx %o1, [%i3] ret restore - .size sun4v_scv_send, .-sun4v_scv_send + .size sun4v_svc_send, .-sun4v_svc_send - .globl sun4v_scv_recv - .type sun4v_scv_recv,#function -sun4v_scv_recv: + .globl sun4v_svc_recv + .type sun4v_svc_recv,#function +sun4v_svc_recv: save %sp, -192, %sp mov %i0, %o0 mov %i1, %o1 @@ -2540,33 +2540,55 @@ sun4v_scv_recv: stx %o1, [%i3] ret restore - .size sun4v_scv_recv, .-sun4v_scv_recv + .size sun4v_svc_recv, .-sun4v_svc_recv - .globl sun4v_scv_getstatus - .type sun4v_scv_getstatus,#function -sun4v_scv_getstatus: + .globl sun4v_svc_getstatus + .type sun4v_svc_getstatus,#function +sun4v_svc_getstatus: mov HV_FAST_SVC_GETSTATUS, %o5 mov %o1, %o4 ta HV_FAST_TRAP stx %o1, [%o4] retl nop - .size sun4v_scv_getstatus, .-sun4v_scv_getstatus + .size sun4v_svc_getstatus, .-sun4v_svc_getstatus - .globl sun4v_scv_setstatus - .type sun4v_scv_setstatus,#function -sun4v_scv_setstatus: + .globl sun4v_svc_setstatus + .type sun4v_svc_setstatus,#function +sun4v_svc_setstatus: mov HV_FAST_SVC_SETSTATUS, %o5 ta HV_FAST_TRAP retl nop - .size sun4v_scv_setstatus, .-sun4v_scv_setstatus + .size sun4v_svc_setstatus, .-sun4v_svc_setstatus - .globl sun4v_scv_clrstatus - .type sun4v_scv_clrstatus,#function -sun4v_scv_clrstatus: + .globl sun4v_svc_clrstatus + .type sun4v_svc_clrstatus,#function +sun4v_svc_clrstatus: mov HV_FAST_SVC_CLRSTATUS, %o5 ta HV_FAST_TRAP retl nop - .size sun4v_scv_clrstatus, .-sun4v_scv_clrstatus + .size sun4v_svc_clrstatus, .-sun4v_svc_clrstatus + + .globl sun4v_mmustat_conf + .type sun4v_mmustat_conf,#function +sun4v_mmustat_conf: + mov %o1, %o4 + mov HV_FAST_MMUSTAT_CONF, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop + .size sun4v_mmustat_conf, .-sun4v_mmustat_conf + + .globl sun4v_mmustat_info + .type sun4v_mmustat_info,#function +sun4v_mmustat_info: + mov %o0, %o4 + mov HV_FAST_MMUSTAT_INFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop + .size sun4v_mmustat_info, .-sun4v_mmustat_info diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index 9246c2cf9574..f0e16045fb16 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c @@ -473,6 +473,53 @@ static void __init set_core_ids(void) } } +static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id) +{ + int i; + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + const u64 *id; + + if (strcmp(mp->arcs[i].name, "back")) + continue; + + if (strcmp(t->name, "cpu")) + continue; + + id = md_get_property(t, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).proc_id = proc_id; + } +} + +static void __init __set_proc_ids(const char *exec_unit_name) +{ + struct mdesc_node *mp; + int idx; + + idx = 0; + md_for_each_node_by_name(mp, exec_unit_name) { + const char *type; + int len; + + type = md_get_property(mp, "type", &len); + if (!find_in_proplist(type, "int", len) && + !find_in_proplist(type, "integer", len)) + continue; + + mark_proc_ids(mp, idx); + + idx++; + } +} + +static void __init set_proc_ids(void) +{ + __set_proc_ids("exec_unit"); + __set_proc_ids("exec-unit"); +} + static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) { u64 val; @@ -574,9 +621,15 @@ static void __init mdesc_fill_in_cpu_data(void) #endif c->core_id = 0; + c->proc_id = -1; } +#ifdef CONFIG_SMP + sparc64_multi_core = 1; +#endif + set_core_ids(); + set_proc_ids(); smp_fill_in_sib_core_maps(); } diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index dad4b3ba705f..6f4a5284b0ea 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -1781,6 +1781,10 @@ static void __init of_fill_in_cpu_data(void) } cpu_data(cpuid).core_id = portid + 1; + cpu_data(cpuid).proc_id = portid; +#ifdef CONFIG_SMP + sparc64_multi_core = 1; +#endif } else { cpu_data(cpuid).dcache_size = of_getintprop_default(dp, "dcache-size", 16 * 1024); @@ -1799,6 +1803,7 @@ static void __init of_fill_in_cpu_data(void) of_getintprop_default(dp, "ecache-line-size", 64); cpu_data(cpuid).core_id = 0; + cpu_data(cpuid).proc_id = -1; } #ifdef CONFIG_SMP diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index de9b4c13f1c7..7490cc670a53 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -513,22 +513,3 @@ void sun_do_break(void) int serial_console = -1; int stop_a_enabled = 1; - -static int __init topology_init(void) -{ - int i, err; - - err = -ENOMEM; - - for_each_possible_cpu(i) { - struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); - if (p) { - register_cpu(p, i); - err = 0; - } - } - - return err; -} - -subsys_initcall(topology_init); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c550bba3490a..4dcd7d0b60f2 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -44,6 +44,8 @@ extern void calibrate_delay(void); +int sparc64_multi_core __read_mostly; + /* Please don't make this stuff initdata!!! --DaveM */ unsigned char boot_cpu_id; @@ -51,6 +53,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; static cpumask_t smp_commenced_mask; static cpumask_t cpu_callout_map; @@ -1217,13 +1221,28 @@ void __devinit smp_fill_in_sib_core_maps(void) unsigned int j; if (cpu_data(i).core_id == 0) { - cpu_set(i, cpu_sibling_map[i]); + cpu_set(i, cpu_core_map[i]); continue; } for_each_possible_cpu(j) { if (cpu_data(i).core_id == cpu_data(j).core_id) + cpu_set(j, cpu_core_map[i]); + } + } + + for_each_possible_cpu(i) { + unsigned int j; + + if (cpu_data(i).proc_id == -1) { + cpu_set(i, cpu_sibling_map[i]); + continue; + } + + for_each_possible_cpu(j) { + if (cpu_data(i).proc_id == + cpu_data(j).proc_id) cpu_set(j, cpu_sibling_map[i]); } } diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c new file mode 100644 index 000000000000..cdb1477af89f --- /dev/null +++ b/arch/sparc64/kernel/sysfs.c @@ -0,0 +1,297 @@ +/* sysfs.c: Toplogy sysfs support code for sparc64. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ +#include <linux/sysdev.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/percpu.h> +#include <linux/init.h> + +#include <asm/hypervisor.h> +#include <asm/spitfire.h> + +static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); + +#define SHOW_MMUSTAT_ULONG(NAME) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ + return sprintf(buf, "%lu\n", p->NAME); \ +} \ +static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) + +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); + +static struct attribute *mmu_stat_attrs[] = { + &attr_immu_tsb_hits_ctx0_8k_tte.attr, + &attr_immu_tsb_ticks_ctx0_8k_tte.attr, + &attr_immu_tsb_hits_ctx0_64k_tte.attr, + &attr_immu_tsb_ticks_ctx0_64k_tte.attr, + &attr_immu_tsb_hits_ctx0_4mb_tte.attr, + &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, + &attr_immu_tsb_hits_ctx0_256mb_tte.attr, + &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, + &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, + &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, + &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, + &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, + &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, + &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, + &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, + &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, + NULL, +}; + +static struct attribute_group mmu_stat_group = { + .attrs = mmu_stat_attrs, + .name = "mmu_stats", +}; + +/* XXX convert to rusty's on_one_cpu */ +static unsigned long run_on_cpu(unsigned long cpu, + unsigned long (*func)(unsigned long), + unsigned long arg) +{ + cpumask_t old_affinity = current->cpus_allowed; + unsigned long ret; + + /* should return -EINVAL to userspace */ + if (set_cpus_allowed(current, cpumask_of_cpu(cpu))) + return 0; + + ret = func(arg); + + set_cpus_allowed(current, old_affinity); + + return ret; +} + +static unsigned long read_mmustat_enable(unsigned long junk) +{ + unsigned long ra = 0; + + sun4v_mmustat_info(&ra); + + return ra != 0; +} + +static unsigned long write_mmustat_enable(unsigned long val) +{ + unsigned long ra, orig_ra; + + if (val) + ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); + else + ra = 0UL; + + return sun4v_mmustat_conf(ra, &orig_ra); +} + +static ssize_t show_mmustat_enable(struct sys_device *s, char *buf) +{ + unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); + return sprintf(buf, "%lx\n", val); +} + +static ssize_t store_mmustat_enable(struct sys_device *s, const char *buf, size_t count) +{ + unsigned long val, err; + int ret = sscanf(buf, "%ld", &val); + + if (ret != 1) + return -EINVAL; + + err = run_on_cpu(s->id, write_mmustat_enable, val); + if (err) + return -EIO; + + return count; +} + +static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); + +static int mmu_stats_supported; + +static int register_mmu_stats(struct sys_device *s) +{ + if (!mmu_stats_supported) + return 0; + sysdev_create_file(s, &attr_mmustat_enable); + return sysfs_create_group(&s->kobj, &mmu_stat_group); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void unregister_mmu_stats(struct sys_device *s) +{ + if (!mmu_stats_supported) + return; + sysfs_remove_group(&s->kobj, &mmu_stat_group); + sysdev_remove_file(s, &attr_mmustat_enable); +} +#endif + +#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%lu\n", c->MEMBER); \ +} + +#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%u\n", c->MEMBER); \ +} + +SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); +SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val); +SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); +SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); + +static struct sysdev_attribute cpu_core_attrs[] = { + _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), + _SYSDEV_ATTR(udelay_val, 0444, show_udelay_val, NULL), + _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), + _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), + _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), + _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), + _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), + _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), +}; + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static void register_cpu_online(unsigned int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + int i; + + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + sysdev_create_file(s, &cpu_core_attrs[i]); + + register_mmu_stats(s); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void unregister_cpu_online(unsigned int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + int i; + + unregister_mmu_stats(s); + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + sysdev_remove_file(s, &cpu_core_attrs[i]); +} +#endif + +static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + register_cpu_online(cpu); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + case CPU_DEAD_FROZEN: + unregister_cpu_online(cpu); + break; +#endif + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata sysfs_cpu_nb = { + .notifier_call = sysfs_cpu_notify, +}; + +static void __init check_mmu_stats(void) +{ + unsigned long dummy1, err; + + if (tlb_type != hypervisor) + return; + + err = sun4v_mmustat_info(&dummy1); + if (!err) + mmu_stats_supported = 1; +} + +static int __init topology_init(void) +{ + int cpu; + + check_mmu_stats(); + + register_cpu_notifier(&sysfs_cpu_nb); + + for_each_possible_cpu(cpu) { + struct cpu *c = &per_cpu(cpu_devices, cpu); + + register_cpu(c, cpu); + if (cpu_online(cpu)) + register_cpu_online(cpu); + } + + return 0; +} + +subsys_initcall(topology_init); |