diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-05-25 15:49:59 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-05-29 02:49:41 -0700 |
commit | 5cbc30737398b49f62ae8603129ce43ac7db1a41 (patch) | |
tree | 45d01a686865e6fd9c32b670f77af1e37db03008 | |
parent | e01c0d6d8cf29c1c11725837b265598cab687952 (diff) | |
download | blackbird-op-linux-5cbc30737398b49f62ae8603129ce43ac7db1a41.tar.gz blackbird-op-linux-5cbc30737398b49f62ae8603129ce43ac7db1a41.zip |
[SPARC64]: Use machine description and OBP properly for cpu probing.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/devices.c | 196 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 9 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 83 | ||||
-rw-r--r-- | arch/sparc64/kernel/mdesc.c | 619 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 7 | ||||
-rw-r--r-- | arch/sparc64/kernel/prom.c | 148 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 18 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 136 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_ivec.S | 30 | ||||
-rw-r--r-- | arch/sparc64/kernel/time.c | 9 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 27 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 17 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 22 | ||||
-rw-r--r-- | include/asm-sparc64/hypervisor.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/mdesc.h | 39 | ||||
-rw-r--r-- | include/asm-sparc64/oplib.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/percpu.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/prom.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/smp.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/topology.h | 3 |
21 files changed, 992 insertions, 394 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 18e31a8db017..d8d19093d12f 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror extra-y := head.o init_task.o vmlinux.lds obj-y := process.o setup.o cpu.o idprom.o \ - traps.o devices.o auxio.o una_asm.o \ + traps.o auxio.o una_asm.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \ unaligned.o central.o pci.o starfire.o semaphore.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ - visemul.o prom.o of_device.o hvapi.o sstate.o + visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c deleted file mode 100644 index 0e03c8e218cd..000000000000 --- a/arch/sparc64/kernel/devices.c +++ /dev/null @@ -1,196 +0,0 @@ -/* devices.c: Initial scan of the prom device tree for important - * Sparc device nodes which we need to find. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - */ - -#include <linux/kernel.h> -#include <linux/threads.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/string.h> -#include <linux/spinlock.h> -#include <linux/errno.h> -#include <linux/bootmem.h> - -#include <asm/page.h> -#include <asm/oplib.h> -#include <asm/system.h> -#include <asm/smp.h> -#include <asm/spitfire.h> -#include <asm/timer.h> -#include <asm/cpudata.h> - -/* Used to synchronize accesses to NatSemi SUPER I/O chip configure - * operations in asm/ns87303.h - */ -DEFINE_SPINLOCK(ns87303_lock); - -extern void cpu_probe(void); -extern void central_probe(void); - -static const char *cpu_mid_prop(void) -{ - if (tlb_type == spitfire) - return "upa-portid"; - return "portid"; -} - -static int get_cpu_mid(struct device_node *dp) -{ - struct property *prop; - - if (tlb_type == hypervisor) { - struct linux_prom64_registers *reg; - int len; - - prop = of_find_property(dp, "cpuid", &len); - if (prop && len == 4) - return *(int *) prop->value; - - prop = of_find_property(dp, "reg", NULL); - reg = prop->value; - return (reg[0].phys_addr >> 32) & 0x0fffffffUL; - } else { - const char *prop_name = cpu_mid_prop(); - - prop = of_find_property(dp, prop_name, NULL); - if (prop) - return *(int *) prop->value; - return 0; - } -} - -static int check_cpu_node(struct device_node *dp, int *cur_inst, - int (*compare)(struct device_node *, int, void *), - void *compare_arg, - struct device_node **dev_node, int *mid) -{ - if (!compare(dp, *cur_inst, compare_arg)) { - if (dev_node) - *dev_node = dp; - if (mid) - *mid = get_cpu_mid(dp); - return 0; - } - - (*cur_inst)++; - - return -ENODEV; -} - -static int __cpu_find_by(int (*compare)(struct device_node *, int, void *), - void *compare_arg, - struct device_node **dev_node, int *mid) -{ - struct device_node *dp; - int cur_inst; - - cur_inst = 0; - for_each_node_by_type(dp, "cpu") { - int err = check_cpu_node(dp, &cur_inst, - compare, compare_arg, - dev_node, mid); - if (err == 0) - return 0; - } - - return -ENODEV; -} - -static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg) -{ - int desired_instance = (int) (long) _arg; - - if (instance == desired_instance) - return 0; - return -ENODEV; -} - -int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid) -{ - return __cpu_find_by(cpu_instance_compare, (void *)(long)instance, - dev_node, mid); -} - -static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg) -{ - int desired_mid = (int) (long) _arg; - int this_mid; - - this_mid = get_cpu_mid(dp); - if (this_mid == desired_mid) - return 0; - return -ENODEV; -} - -int cpu_find_by_mid(int mid, struct device_node **dev_node) -{ - return __cpu_find_by(cpu_mid_compare, (void *)(long)mid, - dev_node, NULL); -} - -void __init device_scan(void) -{ - /* FIX ME FAST... -DaveM */ - ioport_resource.end = 0xffffffffffffffffUL; - - prom_printf("Booting Linux...\n"); - -#ifndef CONFIG_SMP - { - struct device_node *dp; - int err, def; - - err = cpu_find_by_instance(0, &dp, NULL); - if (err) { - prom_printf("No cpu nodes, cannot continue\n"); - prom_halt(); - } - cpu_data(0).clock_tick = - of_getintprop_default(dp, "clock-frequency", 0); - - def = ((tlb_type == hypervisor) ? - (8 * 1024) : - (16 * 1024)); - cpu_data(0).dcache_size = of_getintprop_default(dp, - "dcache-size", - def); - - def = 32; - cpu_data(0).dcache_line_size = - of_getintprop_default(dp, "dcache-line-size", def); - - def = 16 * 1024; - cpu_data(0).icache_size = of_getintprop_default(dp, - "icache-size", - def); - - def = 32; - cpu_data(0).icache_line_size = - of_getintprop_default(dp, "icache-line-size", def); - - def = ((tlb_type == hypervisor) ? - (3 * 1024 * 1024) : - (4 * 1024 * 1024)); - cpu_data(0).ecache_size = of_getintprop_default(dp, - "ecache-size", - def); - - def = 64; - cpu_data(0).ecache_line_size = - of_getintprop_default(dp, "ecache-line-size", def); - printk("CPU[0]: Caches " - "D[sz(%d):line_sz(%d)] " - "I[sz(%d):line_sz(%d)] " - "E[sz(%d):line_sz(%d)]\n", - cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, - cpu_data(0).icache_size, cpu_data(0).icache_line_size, - cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); - } -#endif - - central_probe(); - - cpu_probe(); -} diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b5dbd5709155..f8cc3c0731c7 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1949,3 +1949,12 @@ sun4v_mach_set_soft_state: ta HV_FAST_TRAP retl nop + + .globl sun4v_mach_desc +sun4v_mach_desc: + mov %o2, %o4 + mov HV_FAST_MACH_DESC, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 3edc18e1b818..a36f8dd0c021 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -171,8 +171,6 @@ skip: return 0; } -extern unsigned long real_hard_smp_processor_id(void); - static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) { unsigned int tid; @@ -694,9 +692,20 @@ void init_irqwork_curcpu(void) trap_block[cpu].irq_worklist = 0; } -static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) +/* Please be very careful with register_one_mondo() and + * sun4v_register_mondo_queues(). + * + * On SMP this gets invoked from the CPU trampoline before + * the cpu has fully taken over the trap table from OBP, + * and it's kernel stack + %g6 thread register state is + * not fully cooked yet. + * + * Therefore you cannot make any OBP calls, not even prom_printf, + * from these two routines. + */ +static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) { - unsigned long num_entries = 128; + unsigned long num_entries = (qmask + 1) / 64; unsigned long status; status = sun4v_cpu_qconf(type, paddr, num_entries); @@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu) { struct trap_per_cpu *tb = &trap_block[this_cpu]; - register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); - register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); - register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); - register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); + register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, + tb->cpu_mondo_qmask); + register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, + tb->dev_mondo_qmask); + register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, + tb->resum_qmask); + register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, + tb->nonresum_qmask); } -static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) +static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) { - void *page; + unsigned long size = PAGE_ALIGN(qmask + 1); + unsigned long order = get_order(size); + void *p = NULL; - if (use_bootmem) - page = alloc_bootmem_low_pages(PAGE_SIZE); - else - page = (void *) get_zeroed_page(GFP_ATOMIC); + if (use_bootmem) { + p = __alloc_bootmem_low(size, size, 0); + } else { + struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); + if (page) + p = page_address(page); + } - if (!page) { + if (!p) { prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); prom_halt(); } - *pa_ptr = __pa(page); + *pa_ptr = __pa(p); } -static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) +static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) { - void *page; + unsigned long size = PAGE_ALIGN(qmask + 1); + unsigned long order = get_order(size); + void *p = NULL; - if (use_bootmem) - page = alloc_bootmem_low_pages(PAGE_SIZE); - else - page = (void *) get_zeroed_page(GFP_ATOMIC); + if (use_bootmem) { + p = __alloc_bootmem_low(size, size, 0); + } else { + struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); + if (page) + p = page_address(page); + } - if (!page) { + if (!p) { prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); prom_halt(); } - *pa_ptr = __pa(page); + *pa_ptr = __pa(p); } static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) @@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int struct trap_per_cpu *tb = &trap_block[cpu]; if (alloc) { - alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); - alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); - alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); - alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); - alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); - alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); + alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem); + alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem); + alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem); + alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem); + alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem); + alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem); init_cpu_send_mondo_info(tb, use_bootmem); } diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c new file mode 100644 index 000000000000..9246c2cf9574 --- /dev/null +++ b/arch/sparc64/kernel/mdesc.c @@ -0,0 +1,619 @@ +/* mdesc.c: Sun4V machine description handling. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/bootmem.h> +#include <linux/log2.h> + +#include <asm/hypervisor.h> +#include <asm/mdesc.h> +#include <asm/prom.h> +#include <asm/oplib.h> +#include <asm/smp.h> + +/* Unlike the OBP device tree, the machine description is a full-on + * DAG. An arbitrary number of ARCs are possible from one + * node to other nodes and thus we can't use the OBP device_node + * data structure to represent these nodes inside of the kernel. + * + * Actually, it isn't even a DAG, because there are back pointers + * which create cycles in the graph. + * + * mdesc_hdr and mdesc_elem describe the layout of the data structure + * we get from the Hypervisor. + */ +struct mdesc_hdr { + u32 version; /* Transport version */ + u32 node_sz; /* node block size */ + u32 name_sz; /* name block size */ + u32 data_sz; /* data block size */ +}; + +struct mdesc_elem { + u8 tag; +#define MD_LIST_END 0x00 +#define MD_NODE 0x4e +#define MD_NODE_END 0x45 +#define MD_NOOP 0x20 +#define MD_PROP_ARC 0x61 +#define MD_PROP_VAL 0x76 +#define MD_PROP_STR 0x73 +#define MD_PROP_DATA 0x64 + u8 name_len; + u16 resv; + u32 name_offset; + union { + struct { + u32 data_len; + u32 data_offset; + } data; + u64 val; + } d; +}; + +static struct mdesc_hdr *main_mdesc; +static struct mdesc_node *allnodes; + +static struct mdesc_node *allnodes_tail; +static unsigned int unique_id; + +static struct mdesc_node **mdesc_hash; +static unsigned int mdesc_hash_size; + +static inline unsigned int node_hashfn(u64 node) +{ + return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16))) + & (mdesc_hash_size - 1); +} + +static inline void hash_node(struct mdesc_node *mp) +{ + struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)]; + + mp->hash_next = *head; + *head = mp; + + if (allnodes_tail) { + allnodes_tail->allnodes_next = mp; + allnodes_tail = mp; + } else { + allnodes = allnodes_tail = mp; + } +} + +static struct mdesc_node *find_node(u64 node) +{ + struct mdesc_node *mp = mdesc_hash[node_hashfn(node)]; + + while (mp) { + if (mp->node == node) + return mp; + + mp = mp->hash_next; + } + return NULL; +} + +struct property *md_find_property(const struct mdesc_node *mp, + const char *name, + int *lenp) +{ + struct property *pp; + + for (pp = mp->properties; pp != 0; pp = pp->next) { + if (strcasecmp(pp->name, name) == 0) { + if (lenp) + *lenp = pp->length; + break; + } + } + return pp; +} +EXPORT_SYMBOL(md_find_property); + +/* + * Find a property with a given name for a given node + * and return the value. + */ +const void *md_get_property(const struct mdesc_node *mp, const char *name, + int *lenp) +{ + struct property *pp = md_find_property(mp, name, lenp); + return pp ? pp->value : NULL; +} +EXPORT_SYMBOL(md_get_property); + +struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, + const char *name) +{ + struct mdesc_node *mp; + + mp = from ? from->allnodes_next : allnodes; + for (; mp != NULL; mp = mp->allnodes_next) { + if (strcmp(mp->name, name) == 0) + break; + } + return mp; +} +EXPORT_SYMBOL(md_find_node_by_name); + +static unsigned int mdesc_early_allocated; + +static void * __init mdesc_early_alloc(unsigned long size) +{ + void *ret; + + ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); + if (ret == NULL) { + prom_printf("MDESC: alloc of %lu bytes failed.\n", size); + prom_halt(); + } + + memset(ret, 0, size); + + mdesc_early_allocated += size; + + return ret; +} + +static unsigned int __init count_arcs(struct mdesc_elem *ep) +{ + unsigned int ret = 0; + + ep++; + while (ep->tag != MD_NODE_END) { + if (ep->tag == MD_PROP_ARC) + ret++; + ep++; + } + return ret; +} + +static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names) +{ + unsigned int num_arcs = count_arcs(ep); + struct mdesc_node *mp; + + mp = mdesc_early_alloc(sizeof(*mp) + + (num_arcs * sizeof(struct mdesc_arc))); + mp->name = names + ep->name_offset; + mp->node = node; + mp->unique_id = unique_id++; + mp->num_arcs = num_arcs; + + hash_node(mp); +} + +static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) +{ + return (struct mdesc_elem *) (mdesc + 1); +} + +static inline void *name_block(struct mdesc_hdr *mdesc) +{ + return ((void *) node_block(mdesc)) + mdesc->node_sz; +} + +static inline void *data_block(struct mdesc_hdr *mdesc) +{ + return ((void *) name_block(mdesc)) + mdesc->name_sz; +} + +/* In order to avoid recursion (the graph can be very deep) we use a + * two pass algorithm. First we allocate all the nodes and hash them. + * Then we iterate over each node, filling in the arcs and properties. + */ +static void __init build_all_nodes(struct mdesc_hdr *mdesc) +{ + struct mdesc_elem *start, *ep; + struct mdesc_node *mp; + const char *names; + void *data; + u64 last_node; + + start = ep = node_block(mdesc); + last_node = mdesc->node_sz / 16; + + names = name_block(mdesc); + + while (1) { + u64 node = ep - start; + + if (ep->tag == MD_LIST_END) + break; + + if (ep->tag != MD_NODE) { + prom_printf("MDESC: Inconsistent element list.\n"); + prom_halt(); + } + + mdesc_node_alloc(node, ep, names); + + if (ep->d.val >= last_node) { + printk("MDESC: Warning, early break out of node scan.\n"); + printk("MDESC: Next node [%lu] last_node [%lu].\n", + node, last_node); + break; + } + + ep = start + ep->d.val; + } + + data = data_block(mdesc); + for (mp = allnodes; mp; mp = mp->allnodes_next) { + struct mdesc_elem *ep = start + mp->node; + struct property **link = &mp->properties; + unsigned int this_arc = 0; + + ep++; + while (ep->tag != MD_NODE_END) { + switch (ep->tag) { + case MD_PROP_ARC: { + struct mdesc_node *target; + + if (this_arc >= mp->num_arcs) { + prom_printf("MDESC: ARC overrun [%u:%u]\n", + this_arc, mp->num_arcs); + prom_halt(); + } + target = find_node(ep->d.val); + if (!target) { + printk("MDESC: Warning, arc points to " + "missing node, ignoring.\n"); + break; + } + mp->arcs[this_arc].name = + (names + ep->name_offset); + mp->arcs[this_arc].arc = target; + this_arc++; + break; + } + + case MD_PROP_VAL: + case MD_PROP_STR: + case MD_PROP_DATA: { + struct property *p = mdesc_early_alloc(sizeof(*p)); + + p->unique_id = unique_id++; + p->name = (char *) names + ep->name_offset; + if (ep->tag == MD_PROP_VAL) { + p->value = &ep->d.val; + p->length = 8; + } else { + p->value = data + ep->d.data.data_offset; + p->length = ep->d.data.data_len; + } + *link = p; + link = &p->next; + break; + } + + case MD_NOOP: + break; + + default: + printk("MDESC: Warning, ignoring unknown tag type %02x\n", + ep->tag); + } + ep++; + } + } +} + +static unsigned int __init count_nodes(struct mdesc_hdr *mdesc) +{ + struct mdesc_elem *ep = node_block(mdesc); + struct mdesc_elem *end; + unsigned int cnt = 0; + + end = ((void *)ep) + mdesc->node_sz; + while (ep < end) { + if (ep->tag == MD_NODE) + cnt++; + ep++; + } + return cnt; +} + +static void __init report_platform_properties(void) +{ + struct mdesc_node *pn = md_find_node_by_name(NULL, "platform"); + const char *s; + const u64 *v; + + if (!pn) { + prom_printf("No platform node in machine-description.\n"); + prom_halt(); + } + + s = md_get_property(pn, "banner-name", NULL); + printk("PLATFORM: banner-name [%s]\n", s); + s = md_get_property(pn, "name", NULL); + printk("PLATFORM: name [%s]\n", s); + + v = md_get_property(pn, "hostid", NULL); + if (v) + printk("PLATFORM: hostid [%08lx]\n", *v); + v = md_get_property(pn, "serial#", NULL); + if (v) + printk("PLATFORM: serial# [%08lx]\n", *v); + v = md_get_property(pn, "stick-frequency", NULL); + printk("PLATFORM: stick-frequency [%08lx]\n", *v); + v = md_get_property(pn, "mac-address", NULL); + if (v) + printk("PLATFORM: mac-address [%lx]\n", *v); + v = md_get_property(pn, "watchdog-resolution", NULL); + if (v) + printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); + v = md_get_property(pn, "watchdog-max-timeout", NULL); + if (v) + printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); + v = md_get_property(pn, "max-cpus", NULL); + if (v) + printk("PLATFORM: max-cpus [%lu]\n", *v); +} + +static int inline find_in_proplist(const char *list, const char *match, int len) +{ + while (len > 0) { + int l; + + if (!strcmp(list, match)) + return 1; + l = strlen(list) + 1; + list += l; + len -= l; + } + return 0; +} + +static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) +{ + const u64 *level = md_get_property(mp, "level", NULL); + const u64 *size = md_get_property(mp, "size", NULL); + const u64 *line_size = md_get_property(mp, "line-size", NULL); + const char *type; + int type_len; + + type = md_get_property(mp, "type", &type_len); + + switch (*level) { + case 1: + if (find_in_proplist(type, "instn", type_len)) { + c->icache_size = *size; + c->icache_line_size = *line_size; + } else if (find_in_proplist(type, "data", type_len)) { + c->dcache_size = *size; + c->dcache_line_size = *line_size; + } + break; + + case 2: + c->ecache_size = *size; + c->ecache_line_size = *line_size; + break; + + default: + break; + } + + if (*level == 1) { + unsigned int i; + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + + if (strcmp(mp->arcs[i].name, "fwd")) + continue; + + if (!strcmp(t->name, "cache")) + fill_in_one_cache(c, t); + } + } +} + +static void __init mark_core_ids(struct mdesc_node *mp, int core_id) +{ + unsigned int i; + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + const u64 *id; + + if (strcmp(mp->arcs[i].name, "back")) + continue; + + if (!strcmp(t->name, "cpu")) { + id = md_get_property(t, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).core_id = core_id; + } else { + unsigned int j; + + for (j = 0; j < t->num_arcs; j++) { + struct mdesc_node *n = t->arcs[j].arc; + + if (strcmp(t->arcs[j].name, "back")) + continue; + + if (strcmp(n->name, "cpu")) + continue; + + id = md_get_property(n, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).core_id = core_id; + } + } + } +} + +static void __init set_core_ids(void) +{ + struct mdesc_node *mp; + int idx; + + idx = 1; + md_for_each_node_by_name(mp, "cache") { + const u64 *level = md_get_property(mp, "level", NULL); + const char *type; + int len; + + if (*level != 1) + continue; + + type = md_get_property(mp, "type", &len); + if (!find_in_proplist(type, "instn", len)) + continue; + + mark_core_ids(mp, idx); + + idx++; + } +} + +static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) +{ + u64 val; + + if (!p) + goto use_default; + val = *p; + + if (!val || val >= 64) + goto use_default; + + *mask = ((1U << val) * 64U) - 1U; + return; + +use_default: + *mask = ((1U << def) * 64U) - 1U; +} + +static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb) +{ + const u64 *val; + + val = md_get_property(mp, "q-cpu-mondo-#bits", NULL); + get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); + + val = md_get_property(mp, "q-dev-mondo-#bits", NULL); + get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); + + val = md_get_property(mp, "q-resumable-#bits", NULL); + get_one_mondo_bits(val, &tb->resum_qmask, 6); + + val = md_get_property(mp, "q-nonresumable-#bits", NULL); + get_one_mondo_bits(val, &tb->nonresum_qmask, 2); +} + +static void __init mdesc_fill_in_cpu_data(void) +{ + struct mdesc_node *mp; + + ncpus_probed = 0; + md_for_each_node_by_name(mp, "cpu") { + const u64 *id = md_get_property(mp, "id", NULL); + const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL); + struct trap_per_cpu *tb; + cpuinfo_sparc *c; + unsigned int i; + int cpuid; + + ncpus_probed++; + + cpuid = *id; + +#ifdef CONFIG_SMP + if (cpuid >= NR_CPUS) + continue; +#else + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + continue; + cpuid = 0; +#endif + + c = &cpu_data(cpuid); + c->clock_tick = *cfreq; + + tb = &trap_block[cpuid]; + get_mondo_data(mp, tb); + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + unsigned int j; + + if (strcmp(mp->arcs[i].name, "fwd")) + continue; + + if (!strcmp(t->name, "cache")) { + fill_in_one_cache(c, t); + continue; + } + + for (j = 0; j < t->num_arcs; j++) { + struct mdesc_node *n; + + n = t->arcs[j].arc; + if (strcmp(t->arcs[j].name, "fwd")) + continue; + + if (!strcmp(n->name, "cache")) + fill_in_one_cache(c, n); + } + } + +#ifdef CONFIG_SMP + cpu_set(cpuid, cpu_present_map); + cpu_set(cpuid, phys_cpu_present_map); +#endif + + c->core_id = 0; + } + + set_core_ids(); + + smp_fill_in_sib_core_maps(); +} + +void __init sun4v_mdesc_init(void) +{ + unsigned long len, real_len, status; + + (void) sun4v_mach_desc(0UL, 0UL, &len); + + printk("MDESC: Size is %lu bytes.\n", len); + + main_mdesc = mdesc_early_alloc(len); + + status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len); + if (status != HV_EOK || real_len > len) { + prom_printf("sun4v_mach_desc fails, err(%lu), " + "len(%lu), real_len(%lu)\n", + status, len, real_len); + prom_halt(); + } + + len = count_nodes(main_mdesc); + printk("MDESC: %lu nodes.\n", len); + + len = roundup_pow_of_two(len); + + mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *)); + mdesc_hash_size = len; + + printk("MDESC: Hash size %lu entries.\n", len); + + build_all_nodes(main_mdesc); + + printk("MDESC: Built graph with %u bytes of memory.\n", + mdesc_early_allocated); + + report_platform_properties(); + mdesc_fill_in_cpu_data(); +} diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index e2377796de89..323d6c278518 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -762,9 +762,10 @@ void sabre_init(struct device_node *dp, char *model_name) /* Of course, Sun has to encode things a thousand * different ways, inconsistently. */ - cpu_find_by_instance(0, &dp, NULL); - if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) - hummingbird_p = 1; + for_each_node_by_type(dp, "cpu") { + if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) + hummingbird_p = 1; + } } } diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index 02830e4671f5..dad4b3ba705f 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -28,6 +28,7 @@ #include <asm/irq.h> #include <asm/asi.h> #include <asm/upa.h> +#include <asm/smp.h> static struct device_node *allnodes; @@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl return ret; } +static const char *get_mid_prop(void) +{ + return (tlb_type == spitfire ? "upa-portid" : "portid"); +} + +struct device_node *of_find_node_by_cpuid(int cpuid) +{ + struct device_node *dp; + const char *mid_prop = get_mid_prop(); + + for_each_node_by_type(dp, "cpu") { + int id = of_getintprop_default(dp, mid_prop, -1); + const char *this_mid_prop = mid_prop; + + if (id < 0) { + this_mid_prop = "cpuid"; + id = of_getintprop_default(dp, this_mid_prop, -1); + } + + if (id < 0) { + prom_printf("OF: Serious problem, cpu lacks " + "%s property", this_mid_prop); + prom_halt(); + } + if (cpuid == id) + return dp; + } + return NULL; +} + +static void __init of_fill_in_cpu_data(void) +{ + struct device_node *dp; + const char *mid_prop = get_mid_prop(); + + ncpus_probed = 0; + for_each_node_by_type(dp, "cpu") { + int cpuid = of_getintprop_default(dp, mid_prop, -1); + const char *this_mid_prop = mid_prop; + struct device_node *portid_parent; + int portid = -1; + + portid_parent = NULL; + if (cpuid < 0) { + this_mid_prop = "cpuid"; + cpuid = of_getintprop_default(dp, this_mid_prop, -1); + if (cpuid >= 0) { + int limit = 2; + + portid_parent = dp; + while (limit--) { + portid_parent = portid_parent->parent; + if (!portid_parent) + break; + portid = of_getintprop_default(portid_parent, + "portid", -1); + if (portid >= 0) + break; + } + } + } + + if (cpuid < 0) { + prom_printf("OF: Serious problem, cpu lacks " + "%s property", this_mid_prop); + prom_halt(); + } + + ncpus_probed++; + +#ifdef CONFIG_SMP + if (cpuid >= NR_CPUS) + continue; +#else + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + continue; + cpuid = 0; +#endif + + cpu_data(cpuid).clock_tick = + of_getintprop_default(dp, "clock-frequency", 0); + + if (portid_parent) { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "l1-dcache-size", + 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "l1-dcache-line-size", + 32); + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "l1-icache-size", + 8 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "l1-icache-line-size", + 32); + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "l2-cache-size", 0); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "l2-cache-line-size", 0); + if (!cpu_data(cpuid).ecache_size || + !cpu_data(cpuid).ecache_line_size) { + cpu_data(cpuid).ecache_size = + of_getintprop_default(portid_parent, + "l2-cache-size", + (4 * 1024 * 1024)); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(portid_parent, + "l2-cache-line-size", 64); + } + + cpu_data(cpuid).core_id = portid + 1; + } else { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "dcache-size", 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "dcache-line-size", 32); + + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "icache-size", 16 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "icache-line-size", 32); + + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "ecache-size", + (4 * 1024 * 1024)); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "ecache-line-size", 64); + + cpu_data(cpuid).core_id = 0; + } + +#ifdef CONFIG_SMP + cpu_set(cpuid, cpu_present_map); + cpu_set(cpuid, phys_cpu_present_map); +#endif + } + + smp_fill_in_sib_core_maps(); +} + void __init prom_build_devicetree(void) { struct device_node **nextp; @@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void) &nextp); printk("PROM: Built device tree with %u bytes of memory.\n", prom_early_allocated); + + if (tlb_type != hypervisor) + of_fill_in_cpu_data(); } diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index dea9c3c9ec5f..de9b4c13f1c7 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -46,11 +46,17 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/mmu.h> +#include <asm/ns87303.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> #endif +/* Used to synchronize accesses to NatSemi SUPER I/O chip configure + * operations in asm/ns87303.h + */ +DEFINE_SPINLOCK(ns87303_lock); + struct screen_info screen_info = { 0, 0, /* orig-x, orig-y */ 0, /* unused */ @@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p) init_cur_cpu_trap(current_thread_info()); paging_init(); - - smp_setup_cpu_possible_map(); } static int __init set_preferred_console(void) @@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *); unsigned int dcache_parity_tl1_occurred; unsigned int icache_parity_tl1_occurred; -static int ncpus_probed; +int ncpus_probed; static int show_cpuinfo(struct seq_file *m, void *__unused) { @@ -516,14 +520,6 @@ static int __init topology_init(void) err = -ENOMEM; - /* Count the number of physically present processors in - * the machine, even on uniprocessor, so that /proc/cpuinfo - * output is consistent with 2.4.x - */ - ncpus_probed = 0; - while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) - ncpus_probed++; - for_each_possible_cpu(i) { struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); if (p) { diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 24fdf1d0adc5..f7fa873c800d 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -40,6 +40,7 @@ #include <asm/tlb.h> #include <asm/sections.h> #include <asm/prom.h> +#include <asm/mdesc.h> extern void calibrate_delay(void); @@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m) i, cpu_data(i).clock_tick); } -void __init smp_store_cpu_info(int id) -{ - struct device_node *dp; - int def; - - cpu_data(id).udelay_val = loops_per_jiffy; - - cpu_find_by_mid(id, &dp); - cpu_data(id).clock_tick = - of_getintprop_default(dp, "clock-frequency", 0); - - def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); - cpu_data(id).dcache_size = - of_getintprop_default(dp, "dcache-size", def); - - def = 32; - cpu_data(id).dcache_line_size = - of_getintprop_default(dp, "dcache-line-size", def); - - def = 16 * 1024; - cpu_data(id).icache_size = - of_getintprop_default(dp, "icache-size", def); - - def = 32; - cpu_data(id).icache_line_size = - of_getintprop_default(dp, "icache-line-size", def); - - def = ((tlb_type == hypervisor) ? - (3 * 1024 * 1024) : - (4 * 1024 * 1024)); - cpu_data(id).ecache_size = - of_getintprop_default(dp, "ecache-size", def); - - def = 64; - cpu_data(id).ecache_line_size = - of_getintprop_default(dp, "ecache-line-size", def); - - printk("CPU[%d]: Caches " - "D[sz(%d):line_sz(%d)] " - "I[sz(%d):line_sz(%d)] " - "E[sz(%d):line_sz(%d)]\n", - id, - cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, - cpu_data(id).icache_size, cpu_data(id).icache_line_size, - cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); -} - extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; @@ -145,7 +99,7 @@ void __init smp_callin(void) local_irq_enable(); calibrate_delay(); - smp_store_cpu_info(cpuid); + cpu_data(cpuid).udelay_val = loops_per_jiffy; callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); @@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) prom_startcpu_cpuid(cpu, entry, cookie); } else { - struct device_node *dp; + struct device_node *dp = of_find_node_by_cpuid(cpu); - cpu_find_by_mid(cpu, &dp); prom_startcpu(dp->node, entry, cookie); } @@ -1191,23 +1144,14 @@ int setup_profiling_timer(unsigned int multiplier) static void __init smp_tune_scheduling(void) { - struct device_node *dp; - int instance; - unsigned int def, smallest = ~0U; - - def = ((tlb_type == hypervisor) ? - (3 * 1024 * 1024) : - (4 * 1024 * 1024)); + unsigned int smallest = ~0U; + int i; - instance = 0; - while (!cpu_find_by_instance(instance, &dp, NULL)) { - unsigned int val; + for (i = 0; i < NR_CPUS; i++) { + unsigned int val = cpu_data(i).ecache_size; - val = of_getintprop_default(dp, "ecache-size", def); - if (val < smallest) + if (val && val < smallest) smallest = val; - - instance++; } /* Any value less than 256K is nonsense. */ @@ -1230,58 +1174,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int i; if (num_possible_cpus() > max_cpus) { - int instance, mid; - - instance = 0; - while (!cpu_find_by_instance(instance, NULL, &mid)) { - if (mid != boot_cpu_id) { - cpu_clear(mid, phys_cpu_present_map); - cpu_clear(mid, cpu_present_map); + for_each_possible_cpu(i) { + if (i != boot_cpu_id) { + cpu_clear(i, phys_cpu_present_map); + cpu_clear(i, cpu_present_map); if (num_possible_cpus() <= max_cpus) break; } - instance++; - } - } - - for_each_possible_cpu(i) { - if (tlb_type == hypervisor) { - int j; - - /* XXX get this mapping from machine description */ - for_each_possible_cpu(j) { - if ((j >> 2) == (i >> 2)) - cpu_set(j, cpu_sibling_map[i]); - } - } else { - cpu_set(i, cpu_sibling_map[i]); } } - smp_store_cpu_info(boot_cpu_id); + cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy; smp_tune_scheduling(); } -/* Set this up early so that things like the scheduler can init - * properly. We use the same cpu mask for both the present and - * possible cpu map. - */ -void __init smp_setup_cpu_possible_map(void) +void __devinit smp_prepare_boot_cpu(void) { - int instance, mid; - - instance = 0; - while (!cpu_find_by_instance(instance, NULL, &mid)) { - if (mid < NR_CPUS) { - cpu_set(mid, phys_cpu_present_map); - cpu_set(mid, cpu_present_map); - } - instance++; - } } -void __devinit smp_prepare_boot_cpu(void) +void __devinit smp_fill_in_sib_core_maps(void) { + unsigned int i; + + for_each_possible_cpu(i) { + unsigned int j; + + if (cpu_data(i).core_id == 0) { + cpu_set(i, cpu_sibling_map[i]); + continue; + } + + for_each_possible_cpu(j) { + if (cpu_data(i).core_id == + cpu_data(j).core_id) + cpu_set(j, cpu_sibling_map[i]); + } + } } int __cpuinit __cpu_up(unsigned int cpu) @@ -1337,7 +1265,7 @@ unsigned long __per_cpu_shift __read_mostly; EXPORT_SYMBOL(__per_cpu_base); EXPORT_SYMBOL(__per_cpu_shift); -void __init setup_per_cpu_areas(void) +void __init real_setup_per_cpu_areas(void) { unsigned long goal, size, i; char *ptr; diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index 405855dd886b..574bc248bca6 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S @@ -22,12 +22,12 @@ sun4v_cpu_mondo: be,pn %xcc, sun4v_cpu_mondo_queue_empty nop - /* Get &trap_block[smp_processor_id()] into %g3. */ - ldxa [%g0] ASI_SCRATCHPAD, %g3 - sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 + /* Get &trap_block[smp_processor_id()] into %g4. */ + ldxa [%g0] ASI_SCRATCHPAD, %g4 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 /* Get CPU mondo queue base phys address into %g7. */ - ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 /* Now get the cross-call arguments and handler PC, same * layout as sun4u: @@ -47,8 +47,7 @@ sun4v_cpu_mondo: add %g2, 0x40 - 0x8 - 0x8, %g2 /* Update queue head pointer. */ - sethi %hi(8192 - 1), %g4 - or %g4, %lo(8192 - 1), %g4 + lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 and %g2, %g4, %g2 mov INTRQ_CPU_MONDO_HEAD, %g4 @@ -71,12 +70,12 @@ sun4v_dev_mondo: be,pn %xcc, sun4v_dev_mondo_queue_empty nop - /* Get &trap_block[smp_processor_id()] into %g3. */ - ldxa [%g0] ASI_SCRATCHPAD, %g3 - sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 + /* Get &trap_block[smp_processor_id()] into %g4. */ + ldxa [%g0] ASI_SCRATCHPAD, %g4 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 /* Get DEV mondo queue base phys address into %g5. */ - ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 + ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 /* Load IVEC into %g3. */ ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 @@ -90,8 +89,7 @@ sun4v_dev_mondo: */ /* Update queue head pointer, this frees up some registers. */ - sethi %hi(8192 - 1), %g4 - or %g4, %lo(8192 - 1), %g4 + lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 and %g2, %g4, %g2 mov INTRQ_DEVICE_MONDO_HEAD, %g4 @@ -143,6 +141,8 @@ sun4v_res_mondo: brnz,pn %g1, sun4v_res_mondo_queue_full nop + lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 + /* Remember this entry's offset in %g1. */ mov %g2, %g1 @@ -173,8 +173,6 @@ sun4v_res_mondo: add %g2, 0x08, %g2 /* Update queue head pointer. */ - sethi %hi(8192 - 1), %g4 - or %g4, %lo(8192 - 1), %g4 and %g2, %g4, %g2 mov INTRQ_RESUM_MONDO_HEAD, %g4 @@ -254,6 +252,8 @@ sun4v_nonres_mondo: brnz,pn %g1, sun4v_nonres_mondo_queue_full nop + lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 + /* Remember this entry's offset in %g1. */ mov %g2, %g1 @@ -284,8 +284,6 @@ sun4v_nonres_mondo: add %g2, 0x08, %g2 /* Update queue head pointer. */ - sethi %hi(8192 - 1), %g4 - or %g4, %lo(8192 - 1), %g4 and %g2, %g4, %g2 mov INTRQ_NONRESUM_MONDO_HEAD, %g4 diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 2d63d7689962..0f62ea82953c 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -862,7 +862,6 @@ fs_initcall(clock_init); static unsigned long sparc64_init_timers(void) { struct device_node *dp; - struct property *prop; unsigned long clock; #ifdef CONFIG_SMP extern void smp_tick_init(void); @@ -879,17 +878,15 @@ static unsigned long sparc64_init_timers(void) if (manuf == 0x17 && impl == 0x13) { /* Hummingbird, aka Ultra-IIe */ tick_ops = &hbtick_operations; - prop = of_find_property(dp, "stick-frequency", NULL); + clock = of_getintprop_default(dp, "stick-frequency", 0); } else { tick_ops = &tick_operations; - cpu_find_by_instance(0, &dp, NULL); - prop = of_find_property(dp, "clock-frequency", NULL); + clock = local_cpu_data().clock_tick; } } else { tick_ops = &stick_operations; - prop = of_find_property(dp, "stick-frequency", NULL); + clock = of_getintprop_default(dp, "stick-frequency", 0); } - clock = *(unsigned int *) prop->value; #ifdef CONFIG_SMP smp_tick_init(); diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index d0fde36395b4..00a9e3286c83 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector void __init cheetah_ecache_flush_init(void) { unsigned long largest_size, smallest_linesize, order, ver; - struct device_node *dp; - int i, instance, sz; + int i, sz; /* Scan all cpu device tree nodes, note two values: * 1) largest E-cache size @@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void) largest_size = 0UL; smallest_linesize = ~0UL; - instance = 0; - while (!cpu_find_by_instance(instance, &dp, NULL)) { + for (i = 0; i < NR_CPUS; i++) { unsigned long val; - val = of_getintprop_default(dp, "ecache-size", - (2 * 1024 * 1024)); + val = cpu_data(i).ecache_size; + if (!val) + continue; + if (val > largest_size) largest_size = val; - val = of_getintprop_default(dp, "ecache-line-size", 64); + + val = cpu_data(i).ecache_line_size; if (val < smallest_linesize) smallest_linesize = val; - instance++; + } if (largest_size == 0UL || smallest_linesize == ~0UL) { @@ -2564,7 +2565,15 @@ void __init trap_init(void) (TRAP_PER_CPU_TSB_HUGE_TEMP != offsetof(struct trap_per_cpu, tsb_huge_temp)) || (TRAP_PER_CPU_IRQ_WORKLIST != - offsetof(struct trap_per_cpu, irq_worklist))) + offsetof(struct trap_per_cpu, irq_worklist)) || + (TRAP_PER_CPU_CPU_MONDO_QMASK != + offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || + (TRAP_PER_CPU_DEV_MONDO_QMASK != + offsetof(struct trap_per_cpu, dev_mondo_qmask)) || + (TRAP_PER_CPU_RESUM_QMASK != + offsetof(struct trap_per_cpu, resum_qmask)) || + (TRAP_PER_CPU_NONRESUM_QMASK != + offsetof(struct trap_per_cpu, nonresum_qmask))) trap_per_cpu_offsets_are_bolixed_dave(); if ((TSB_CONFIG_TSB != diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 0c9995c3b8ed..977698269d3a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -23,6 +23,7 @@ #include <linux/kprobes.h> #include <linux/cache.h> #include <linux/sort.h> +#include <linux/percpu.h> #include <asm/head.h> #include <asm/system.h> @@ -44,8 +45,7 @@ #include <asm/hypervisor.h> #include <asm/prom.h> #include <asm/sstate.h> - -extern void device_scan(void); +#include <asm/mdesc.h> #define MAX_PHYS_ADDRESS (1UL << 42UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) @@ -1335,6 +1335,9 @@ void __cpuinit sun4v_ktsb_register(void) extern void cheetah_ecache_flush_init(void); extern void sun4v_patch_tlb_handlers(void); +extern void cpu_probe(void); +extern void central_probe(void); + static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; @@ -1419,8 +1422,13 @@ void __init paging_init(void) kernel_physical_mapping_init(); + real_setup_per_cpu_areas(); + prom_build_devicetree(); + if (tlb_type == hypervisor) + sun4v_mdesc_init(); + { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; @@ -1437,7 +1445,10 @@ void __init paging_init(void) zholes_size); } - device_scan(); + prom_printf("Booting Linux...\n"); + + central_probe(); + cpu_probe(); } static void __init taint_real_pages(void) diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index e89922d6718c..f321b1d21227 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -17,11 +17,11 @@ typedef struct { /* Dcache line 1 */ unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ - unsigned int __pad0_1; - unsigned int __pad0_2; - unsigned int __pad1; + unsigned int __pad0; unsigned long clock_tick; /* %tick's per second */ unsigned long udelay_val; + unsigned int __pad1; + unsigned int __pad2; /* Dcache line 2, rarely used */ unsigned int dcache_size; @@ -30,8 +30,8 @@ typedef struct { unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; + int core_id; unsigned int __pad3; - unsigned int __pad4; } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); @@ -76,12 +76,18 @@ struct trap_per_cpu { /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ unsigned int irq_worklist; - unsigned int __pad1; - unsigned long __pad2[3]; + unsigned int cpu_mondo_qmask; + unsigned int dev_mondo_qmask; + unsigned int resum_qmask; + unsigned int nonresum_qmask; + unsigned int __pad2[3]; } __attribute__((aligned(64))); extern struct trap_per_cpu trap_block[NR_CPUS]; extern void init_cur_cpu_trap(struct thread_info *); extern void setup_tba(void); +extern int ncpus_probed; + +extern unsigned long real_hard_smp_processor_id(void); struct cpuid_patch_entry { unsigned int addr; @@ -122,6 +128,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, #define TRAP_PER_CPU_TSB_HUGE 0xd0 #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 #define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 +#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4 +#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8 +#define TRAP_PER_CPU_RESUM_QMASK 0xec +#define TRAP_PER_CPU_NONRESUM_QMASK 0xf0 #define TRAP_BLOCK_SZ_SHIFT 8 diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 17b233b64aff..0a241c82fc7b 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -120,6 +120,11 @@ */ #define HV_FAST_MACH_DESC 0x01 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_mach_desc(unsigned long buffer_pa, unsigned long buf_len, + unsigned long *real_buf_len); +#endif + /* mach_exit() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_MACH_SIR diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h new file mode 100644 index 000000000000..124eb8ca2378 --- /dev/null +++ b/include/asm-sparc64/mdesc.h @@ -0,0 +1,39 @@ +#ifndef _SPARC64_MDESC_H +#define _SPARC64_MDESC_H + +#include <linux/types.h> +#include <asm/prom.h> + +struct mdesc_node; +struct mdesc_arc { + const char *name; + struct mdesc_node *arc; +}; + +struct mdesc_node { + const char *name; + u64 node; + unsigned int unique_id; + unsigned int num_arcs; + struct property *properties; + struct mdesc_node *hash_next; + struct mdesc_node *allnodes_next; + struct mdesc_arc arcs[0]; +}; + +extern struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, + const char *name); +#define md_for_each_node_by_name(__mn, __name) \ + for (__mn = md_find_node_by_name(NULL, __name); __mn; \ + __mn = md_find_node_by_name(__mn, __name)) + +extern struct property *md_find_property(const struct mdesc_node *mp, + const char *name, + int *lenp); +extern const void *md_get_property(const struct mdesc_node *mp, + const char *name, + int *lenp); + +extern void sun4v_mdesc_init(void); + +#endif diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index 07275e2366a3..992f9f7a476c 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -319,11 +319,6 @@ extern int prom_inst2pkg(int); extern int prom_service_exists(const char *service_name); extern void prom_sun4v_guest_soft_state(void); -/* CPU probing helpers. */ -struct device_node; -int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid); -int cpu_find_by_mid(int mid, struct device_node **prom_node); - /* Client interface level routines. */ extern void prom_set_trap_table(unsigned long tba); extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa); diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index ced8cbde046d..88db872ce2f8 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -5,7 +5,8 @@ #ifdef CONFIG_SMP -extern void setup_per_cpu_areas(void); +#define setup_per_cpu_areas() do { } while (0) +extern void real_setup_per_cpu_areas(void); extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_shift; @@ -34,6 +35,7 @@ do { \ } while (0) #else /* ! SMP */ +#define real_setup_per_cpu_areas() do { } while (0) #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h index ddad5f99ac7f..b4df3042add0 100644 --- a/include/asm-sparc64/prom.h +++ b/include/asm-sparc64/prom.h @@ -90,6 +90,7 @@ extern struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compat); extern struct device_node *of_find_node_by_path(const char *path); extern struct device_node *of_find_node_by_phandle(phandle handle); +extern struct device_node *of_find_node_by_cpuid(int cpuid); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 869d16fb907b..f76e1492add5 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -41,7 +41,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; extern int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -extern void smp_setup_cpu_possible_map(void); +extern void smp_fill_in_sib_core_maps(void); extern unsigned char boot_cpu_id; #endif /* !(__ASSEMBLY__) */ @@ -49,7 +49,7 @@ extern unsigned char boot_cpu_id; #else #define hard_smp_processor_id() 0 -#define smp_setup_cpu_possible_map() do { } while (0) +#define smp_fill_in_sib_core_maps() do { } while (0) #define boot_cpu_id (0) #endif /* !(CONFIG_SMP) */ diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index 98a6c613589d..e0d450d600ec 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h @@ -6,4 +6,7 @@ #include <asm-generic/topology.h> +#define topology_core_id(cpu) (cpu_data(cpu).core_id) +#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) + #endif /* _ASM_SPARC64_TOPOLOGY_H */ |