diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-07-24 09:49:13 -0600 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-07-24 09:49:13 -0600 |
commit | 4e4f62bf7396fca48efe61513640ee399a6046e3 (patch) | |
tree | 42a503af02d9806bcc05e5fcc2cd53f9bd45b0c2 /arch/powerpc/kernel | |
parent | 9e3288dc9a94fab5ea87db42177d3a9e0345a614 (diff) | |
parent | b37fa16e78d6f9790462b3181602a26b5af36260 (diff) | |
download | talos-obmc-linux-4e4f62bf7396fca48efe61513640ee399a6046e3.tar.gz talos-obmc-linux-4e4f62bf7396fca48efe61513640ee399a6046e3.zip |
Merge commit 'v2.6.35-rc6' into devicetree/next
Conflicts:
arch/sparc/kernel/prom_64.c
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/btext.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/crash_dump.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-swiotlb.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/fsl_booke_entry_mapping.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/irq.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec_64.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 62 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init_check.sh | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas_flash.c | 39 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup-common.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 24 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 4 |
23 files changed, 111 insertions, 143 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 26e58630ed7b..625942ae5585 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -7,7 +7,7 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/sections.h> #include <asm/prom.h> diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index b46f2e09bd81..417f7b05a9ce 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,7 +24,7 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/processor.h> #include <asm/machdep.h> @@ -447,7 +447,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); crash_kexec_stop_spus(); -#ifdef CONFIG_PPC_STD_MMU_64 +#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); #endif if (ppc_md.kexec_cpu_down) diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 5fb667a60894..40f524643ba6 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -13,7 +13,7 @@ #include <linux/crash_dump.h> #include <linux/bootmem.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/code-patching.h> #include <asm/kdump.h> #include <asm/prom.h> @@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; #ifndef CONFIG_RELOCATABLE void __init reserve_kdump_trampoline(void) { - lmb_reserve(0, KDUMP_RESERVE_LIMIT); + memblock_reserve(0, KDUMP_RESERVE_LIMIT); } static void __init create_trampoline(unsigned long addr) diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index e7fe218b8697..02f724f36753 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb, sd->max_direct_dma_addr = 0; /* May need to bounce if the device can't address all of DRAM */ - if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM()) + if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) set_dma_ops(dev, &swiotlb_dma_ops); return NOTIFY_DONE; diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8d1de6f31d5a..84d6367ec003 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -9,7 +9,7 @@ #include <linux/dma-mapping.h> #include <linux/dma-debug.h> #include <linux/gfp.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/bug.h> #include <asm/abs_addr.h> @@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) /* Could be improved so platforms can set the limit in case * they have limited DMA windows */ - return mask >= (lmb_end_of_DRAM() - 1); + return mask >= (memblock_end_of_DRAM() - 1); #else return 1; #endif diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S index beb4d78a2304..a92c79be2728 100644 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S @@ -205,8 +205,7 @@ next_tlb_setup: bdnz+ next_tlb_setup /* 7. Jump to our 1:1 mapping */ - li r6, 0 - + mr r6, r25 #else #error You need to specify the mapping or not use this at all. #endif @@ -217,7 +216,6 @@ next_tlb_setup: 1: mflr r9 rlwimi r6,r9,0,20,31 addi r6,r6,(2f - 1b) - add r6, r6, r25 mtspr SPRN_SRR0,r6 mtspr SPRN_SRR1,r7 rfi /* start execution out of TLB1[0] entry */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2676ef288bf5..60d39b7414c0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -297,7 +297,10 @@ void fixup_irqs(const struct cpumask *map) for_each_irq(irq) { desc = irq_to_desc(irq); - if (desc && desc->status & IRQ_PER_CPU) + if (!desc) + continue; + + if (desc->status & IRQ_PER_CPU) continue; cpumask_and(mask, desc->affinity, map); @@ -319,7 +322,6 @@ void fixup_irqs(const struct cpumask *map) } #endif -#ifdef CONFIG_IRQSTACKS static inline void handle_one_irq(unsigned int irq) { struct thread_info *curtp, *irqtp; @@ -360,12 +362,6 @@ static inline void handle_one_irq(unsigned int irq) if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } -#else -static inline void handle_one_irq(unsigned int irq) -{ - generic_handle_irq(irq); -} -#endif static inline void check_stack_overflow(void) { @@ -457,7 +453,6 @@ void exc_lvl_ctx_init(void) } #endif -#ifdef CONFIG_IRQSTACKS struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; @@ -494,10 +489,6 @@ static inline void do_softirq_onstack(void) irqtp->task = NULL; } -#else -#define do_softirq_onstack() __do_softirq() -#endif /* CONFIG_IRQSTACKS */ - void do_softirq(void) { unsigned long flags; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index bb3d893a8353..89f005116aac 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -12,7 +12,7 @@ #include <linux/kexec.h> #include <linux/reboot.h> #include <linux/threads.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/prom.h> @@ -66,11 +66,11 @@ void __init reserve_crashkernel(void) unsigned long long crash_size, crash_base; int ret; - /* this is necessary because of lmb_phys_mem_size() */ - lmb_analyze(); + /* this is necessary because of memblock_phys_mem_size() */ + memblock_analyze(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -133,9 +133,9 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(lmb_phys_mem_size() >> 20)); + (unsigned long)(memblock_phys_mem_size() >> 20)); - lmb_reserve(crashk_res.start, crash_size); + memblock_reserve(crashk_res.start, crash_size); } int overlaps_crashkernel(unsigned long start, unsigned long size) diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 26f9900f773c..ed31a29c4ff7 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -182,28 +182,12 @@ static void kexec_prepare_cpus_wait(int wait_state) my_cpu = get_cpu(); /* Make sure each CPU has atleast made it to the state we need */ - for (i=0; i < NR_CPUS; i++) { + for_each_online_cpu(i) { if (i == my_cpu) continue; while (paca[i].kexec_state < wait_state) { barrier(); - if (!cpu_possible(i)) { - printk("kexec: cpu %d hw_cpu_id %d is not" - " possible, ignoring\n", - i, paca[i].hw_cpu_id); - break; - } - if (!cpu_online(i)) { - /* Fixme: this can be spinning in - * pSeries_secondary_wait with a paca - * waiting for it to go online. - */ - printk("kexec: cpu %d hw_cpu_id %d is not" - " online, ignoring\n", - i, paca[i].hw_cpu_id); - break; - } if (i != notified) { printk( "kexec: waiting for cpu %d (physical" " %d) to enter %i state\n", diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index dc66d52dcff5..6bbd7a604d24 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -33,7 +33,6 @@ .text -#ifdef CONFIG_IRQSTACKS _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) @@ -56,7 +55,6 @@ _GLOBAL(call_handle_irq) lwz r0,4(r1) mtlr r0 blr -#endif /* CONFIG_IRQSTACKS */ /* * This returns the high 64 bits of the product of two 64-bit numbers. diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index a2b18dffa03e..e5144906a56d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -28,7 +28,6 @@ .text -#ifdef CONFIG_IRQSTACKS _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) @@ -52,7 +51,6 @@ _GLOBAL(call_handle_irq) ld r0,16(r1) mtlr r0 blr -#endif /* CONFIG_IRQSTACKS */ .section ".toc","aw" PPC64_CACHES: diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f88acf0218db..139a773853f4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -9,7 +9,7 @@ #include <linux/threads.h> #include <linux/module.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/firmware.h> #include <asm/lppaca.h> @@ -117,7 +117,7 @@ void __init allocate_pacas(void) * the first segment. On iSeries they must be within the area mapped * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. */ - limit = min(0x10000000ULL, lmb.rmo_size); + limit = min(0x10000000ULL, memblock.rmo_size); if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); @@ -128,7 +128,7 @@ void __init allocate_pacas(void) paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); - paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); + paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", @@ -148,7 +148,7 @@ void __init free_unused_pacas(void) if (new_size >= paca_size) return; - lmb_free(__pa(paca) + new_size, paca_size - new_size); + memblock_free(__pa(paca) + new_size, paca_size - new_size); printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", paca_size - new_size); diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 43b83c35cf54..5c14ffe51258 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -791,8 +791,11 @@ static void power_pmu_disable(struct perf_event *event) cpuhw = &__get_cpu_var(cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { - while (++i < cpuhw->n_events) + while (++i < cpuhw->n_events) { cpuhw->event[i-1] = cpuhw->event[i]; + cpuhw->events[i-1] = cpuhw->events[i]; + cpuhw->flags[i-1] = cpuhw->flags[i]; + } --cpuhw->n_events; ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); if (event->hw.idx) { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9d255b4f0a0e..773424df828a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1005,7 +1005,6 @@ out: return error; } -#ifdef CONFIG_IRQSTACKS static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { @@ -1030,10 +1029,6 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, return 0; } -#else -#define valid_irq_stack(sp, p, nb) 0 -#endif /* CONFIG_IRQSTACKS */ - int validate_sp(unsigned long sp, struct task_struct *p, unsigned long nbytes) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05131d634e73..9d3953983fb7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -31,7 +31,7 @@ #include <linux/kexec.h> #include <linux/debugfs.h> #include <linux/irq.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -98,7 +98,7 @@ static void __init move_device_tree(void) if ((memory_limit && (start + size) > memory_limit) || overlaps_crashkernel(start, size)) { - p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); + p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); memcpy(p, initial_boot_params, size); initial_boot_params = (struct boot_param_header *)p; DBG("Moved device tree to 0x%p\n", p); @@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) { __be32 *dm, *ls, *usm; unsigned long l, n, flags; - u64 base, size, lmb_size; + u64 base, size, memblock_size; unsigned int is_kexec_kdump = 0, rngs; - ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); + ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) return 0; - lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); + memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); if (dm == NULL || l < sizeof(__be32)) @@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) or if the block is not assigned to this partition (0x8) */ if ((flags & 0x80) || !(flags & 0x8)) continue; - size = lmb_size; + size = memblock_size; rngs = 1; if (is_kexec_kdump) { /* - * For each lmb in ibm,dynamic-memory, a corresponding + * For each memblock in ibm,dynamic-memory, a corresponding * entry in linux,drconf-usable-memory property contains * a counter 'p' followed by 'p' (base, size) duple. * Now read the counter from @@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) if ((base + size) > 0x80000000ul) size = 0x80000000ul - base; } - lmb_add(base, size); + memblock_add(base, size); } while (--rngs); } - lmb_dump_all(); + memblock_dump_all(); return 0; } #else @@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) } #endif - lmb_add(base, size); + memblock_add(base, size); memstart_addr = min((u64)memstart_addr, base); } u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return lmb_alloc(size, align); + return memblock_alloc(size, align); } #ifdef CONFIG_BLK_DEV_INITRD @@ -534,12 +534,12 @@ static void __init early_reserve_mem(void) /* before we do anything, lets reserve the dt blob */ self_base = __pa((unsigned long)initial_boot_params); self_size = initial_boot_params->totalsize; - lmb_reserve(self_base, self_size); + memblock_reserve(self_base, self_size); #ifdef CONFIG_BLK_DEV_INITRD /* then reserve the initrd, if any */ if (initrd_start && (initrd_end > initrd_start)) - lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); + memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); #endif /* CONFIG_BLK_DEV_INITRD */ #ifdef CONFIG_PPC32 @@ -560,7 +560,7 @@ static void __init early_reserve_mem(void) if (base_32 == self_base && size_32 == self_size) continue; DBG("reserving: %x -> %x\n", base_32, size_32); - lmb_reserve(base_32, size_32); + memblock_reserve(base_32, size_32); } return; } @@ -571,7 +571,7 @@ static void __init early_reserve_mem(void) if (size == 0) break; DBG("reserving: %llx -> %llx\n", base, size); - lmb_reserve(base, size); + memblock_reserve(base, size); } } @@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void) return phyp_dump_info->reserve_bootvar; /* divide by 20 to get 5% of value */ - tmp = lmb_end_of_DRAM(); + tmp = memblock_end_of_DRAM(); do_div(tmp, 20); /* round it down in multiples of 256 */ @@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void) if (phyp_dump_info->phyp_dump_is_active) { /* Reserve *everything* above RMR.Area freed by userland tools*/ base = variable_reserve_size; - size = lmb_end_of_DRAM() - base; + size = memblock_end_of_DRAM() - base; /* XXX crashed_ram_end is wrong, since it may be beyond * the memory_limit, it will need to be adjusted. */ - lmb_reserve(base, size); + memblock_reserve(base, size); phyp_dump_info->init_reserve_start = base; phyp_dump_info->init_reserve_size = size; @@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void) size = phyp_dump_info->cpu_state_size + phyp_dump_info->hpte_region_size + variable_reserve_size; - base = lmb_end_of_DRAM() - size; - lmb_reserve(base, size); + base = memblock_end_of_DRAM() - size; + memblock_reserve(base, size); phyp_dump_info->init_reserve_start = base; phyp_dump_info->init_reserve_size = size; } @@ -681,8 +681,8 @@ void __init early_init_devtree(void *params) */ of_scan_flat_dt(early_init_dt_scan_chosen, NULL); - /* Scan memory nodes and rebuild LMBs */ - lmb_init(); + /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); @@ -690,11 +690,11 @@ void __init early_init_devtree(void *params) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); - /* Reserve LMB regions used by kernel, initrd, dt, etc... */ - lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); + /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ + memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); /* If relocatable, reserve first 32k for interrupt vectors etc. */ if (PHYSICAL_START > MEMORY_START) - lmb_reserve(MEMORY_START, 0x8000); + memblock_reserve(MEMORY_START, 0x8000); reserve_kdump_trampoline(); reserve_crashkernel(); early_reserve_mem(); @@ -706,17 +706,17 @@ void __init early_init_devtree(void *params) /* Ensure that total memory size is page-aligned, because * otherwise mark_bootmem() gets upset. */ - lmb_analyze(); - memsize = lmb_phys_mem_size(); + memblock_analyze(); + memsize = memblock_phys_mem_size(); if ((memsize & PAGE_MASK) != memsize) limit = memsize & PAGE_MASK; } - lmb_enforce_memory_limit(limit); + memblock_enforce_memory_limit(limit); - lmb_analyze(); - lmb_dump_all(); + memblock_analyze(); + memblock_dump_all(); - DBG("Phys. mem: %llx\n", lmb_phys_mem_size()); + DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 97d4bd9442d3..3b6f8ae9b8cc 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -872,7 +872,7 @@ static void __init prom_send_capabilities(void) "ibm_architecture_vec structure inconsistent: 0x%x !\n", *cores); } else { - *cores = NR_CPUS / prom_count_smt_threads(); + *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); prom_printf("Max number of cores passed to firmware: 0x%x\n", (unsigned long)*cores); } diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 1ac136b128f0..9f82f4937892 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -52,12 +52,18 @@ do if [ "${UNDEF:0:9}" = "_restgpr_" ]; then OK=1 fi + if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then + OK=1 + fi if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then OK=1 fi if [ "${UNDEF:0:9}" = "_savegpr_" ]; then OK=1 fi + if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then + OK=1 + fi if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then OK=1 fi diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 0e1ec6f746f6..d0516dbee762 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,7 +22,7 @@ #include <linux/smp.h> #include <linux/completion.h> #include <linux/cpumask.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <linux/slab.h> #include <asm/prom.h> @@ -934,11 +934,11 @@ void __init rtas_initialize(void) */ #ifdef CONFIG_PPC64 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { - rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); + rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif - rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); + rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index bfc2abafac44..67a84d8f118d 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -94,12 +94,8 @@ struct flash_block_list { struct flash_block_list *next; struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; }; -struct flash_block_list_header { /* just the header of flash_block_list */ - unsigned long num_blocks; - struct flash_block_list *next; -}; -static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; +static struct flash_block_list *rtas_firmware_flash_list; /* Use slab cache to guarantee 4k alignment */ static struct kmem_cache *flash_block_cache = NULL; @@ -108,13 +104,14 @@ static struct kmem_cache *flash_block_cache = NULL; /* Local copy of the flash block list. * We only allow one open of the flash proc file and create this - * list as we go. This list will be put in the - * rtas_firmware_flash_list var once it is fully read. + * list as we go. The rtas_firmware_flash_list varable will be + * set once the data is fully read. * * For convenience as we build the list we use virtual addrs, * we do not fill in the version number, and the length field * is treated as the number of entries currently in the block - * (i.e. not a byte count). This is all fixed on release. + * (i.e. not a byte count). This is all fixed when calling + * the flash routine. */ /* Status int must be first member of struct */ @@ -201,16 +198,16 @@ static int rtas_flash_release(struct inode *inode, struct file *file) if (uf->flist) { /* File was opened in write mode for a new flash attempt */ /* Clear saved list */ - if (rtas_firmware_flash_list.next) { - free_flash_list(rtas_firmware_flash_list.next); - rtas_firmware_flash_list.next = NULL; + if (rtas_firmware_flash_list) { + free_flash_list(rtas_firmware_flash_list); + rtas_firmware_flash_list = NULL; } if (uf->status != FLASH_AUTH) uf->status = flash_list_valid(uf->flist); if (uf->status == FLASH_IMG_READY) - rtas_firmware_flash_list.next = uf->flist; + rtas_firmware_flash_list = uf->flist; else free_flash_list(uf->flist); @@ -593,7 +590,7 @@ static void rtas_flash_firmware(int reboot_type) unsigned long rtas_block_list; int i, status, update_token; - if (rtas_firmware_flash_list.next == NULL) + if (rtas_firmware_flash_list == NULL) return; /* nothing to do */ if (reboot_type != SYS_RESTART) { @@ -610,20 +607,25 @@ static void rtas_flash_firmware(int reboot_type) return; } - /* NOTE: the "first" block list is a global var with no data - * blocks in the kernel data segment. We do this because - * we want to ensure this block_list addr is under 4GB. + /* + * NOTE: the "first" block must be under 4GB, so we create + * an entry with no data blocks in the reserved buffer in + * the kernel data segment. */ - rtas_firmware_flash_list.num_blocks = 0; - flist = (struct flash_block_list *)&rtas_firmware_flash_list; + spin_lock(&rtas_data_buf_lock); + flist = (struct flash_block_list *)&rtas_data_buf[0]; + flist->num_blocks = 0; + flist->next = rtas_firmware_flash_list; rtas_block_list = virt_to_abs(flist); if (rtas_block_list >= 4UL*1024*1024*1024) { printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); + spin_unlock(&rtas_data_buf_lock); return; } printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n"); /* Update the block_list in place. */ + rtas_firmware_flash_list = NULL; /* too hard to backout on error */ image_size = 0; for (f = flist; f; f = next) { /* Translate data addrs to absolute */ @@ -664,6 +666,7 @@ static void rtas_flash_firmware(int reboot_type) printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status); break; } + spin_unlock(&rtas_data_buf_lock); } static void remove_flash_pde(struct proc_dir_entry *dp) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5e4d852f640c..b7e6c7e193ae 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -33,7 +33,7 @@ #include <linux/serial_8250.h> #include <linux/debugfs.h> #include <linux/percpu.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/paca.h> diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 8f58986c2ad9..a10ffc85ada7 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -16,7 +16,7 @@ #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/console.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/io.h> #include <asm/prom.h> @@ -241,23 +241,19 @@ int __init ppc_init(void) arch_initcall(ppc_init); -#ifdef CONFIG_IRQSTACKS static void __init irqstack_early_init(void) { unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); hardirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } } -#else -#define irqstack_early_init() -#endif #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) static void __init exc_lvl_early_init(void) @@ -265,15 +261,15 @@ static void __init exc_lvl_early_init(void) unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { critirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE dbgirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #endif } } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f3fb5a79de52..d135f93cb0f6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,7 +34,7 @@ #include <linux/bootmem.h> #include <linux/pci.h> #include <linux/lockdep.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/io.h> #include <asm/kdump.h> #include <asm/prom.h> @@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca) * the CPU that ignores the top 2 bits of the address in real * mode so we can access kernel globals normally provided we * only toy with things in the RMO region. From here, we do - * some early parsing of the device-tree to setup out LMB + * some early parsing of the device-tree to setup out MEMBLOCK * data structures, and allocate & initialize the hash table * and segment tables so we can start running with translation * enabled. @@ -404,7 +404,7 @@ void __init setup_system(void) printk("-----------------------------------------------------\n"); printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); - printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); + printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); if (ppc64_caches.dline_size != 0x80) printk("ppc64_caches.dcache_line_size = 0x%x\n", ppc64_caches.dline_size); @@ -432,7 +432,6 @@ static u64 slb0_limit(void) return 1UL << SID_SHIFT; } -#ifdef CONFIG_IRQSTACKS static void __init irqstack_early_init(void) { u64 limit = slb0_limit(); @@ -444,16 +443,13 @@ static void __init irqstack_early_init(void) */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc_base(THREAD_SIZE, + __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); hardirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc_base(THREAD_SIZE, + __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); } } -#else -#define irqstack_early_init() -#endif #ifdef CONFIG_PPC_BOOK3E static void __init exc_lvl_early_init(void) @@ -462,11 +458,11 @@ static void __init exc_lvl_early_init(void) for_each_possible_cpu(i) { critirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); dbgirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } } #else @@ -491,11 +487,11 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(slb0_limit(), lmb.rmo_size); + limit = min(slb0_limit(), memblock.rmo_size); for_each_possible_cpu(i) { unsigned long sp; - sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); + sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); sp += THREAD_SIZE; paca[i].emergency_sp = __va(sp); } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d84d19224a95..13002fe206e7 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -22,7 +22,7 @@ #include <linux/elf.h> #include <linux/security.h> #include <linux/bootmem.h> -#include <linux/lmb.h> +#include <linux/memblock.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -734,7 +734,7 @@ static int __init vdso_init(void) vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; if (firmware_has_feature(FW_FEATURE_LPAR)) vdso_data->platform |= 1; - vdso_data->physicalMemorySize = lmb_phys_mem_size(); + vdso_data->physicalMemorySize = memblock_phys_mem_size(); vdso_data->dcache_size = ppc64_caches.dsize; vdso_data->dcache_line_size = ppc64_caches.dline_size; vdso_data->icache_size = ppc64_caches.isize; |