diff options
Diffstat (limited to 'arch/tile/kernel/setup.c')
-rw-r--r-- | arch/tile/kernel/setup.c | 162 |
1 files changed, 134 insertions, 28 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index eceb8344280f..4c34caea9dd3 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -154,6 +154,65 @@ static int __init setup_maxnodemem(char *str) } early_param("maxnodemem", setup_maxnodemem); +struct memmap_entry { + u64 addr; /* start of memory segment */ + u64 size; /* size of memory segment */ +}; +static struct memmap_entry memmap_map[64]; +static int memmap_nr; + +static void add_memmap_region(u64 addr, u64 size) +{ + if (memmap_nr >= ARRAY_SIZE(memmap_map)) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return; + } + memmap_map[memmap_nr].addr = addr; + memmap_map[memmap_nr].size = size; + memmap_nr++; +} + +static int __init setup_memmap(char *p) +{ + char *oldp; + u64 start_at, mem_size; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" not valid on tile\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n"); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n"); + } else if (*p == '$') { + start_at = memparse(p+1, &p); + add_memmap_region(start_at, mem_size); + } else { + if (mem_size == 0) + return -EINVAL; + maxmem_pfn = (mem_size >> HPAGE_SHIFT) << + (HPAGE_SHIFT - PAGE_SHIFT); + } + return *p == '\0' ? 0 : -EINVAL; +} +early_param("memmap", setup_memmap); + +static int __init setup_mem(char *str) +{ + return setup_maxmem(str); +} +early_param("mem", setup_mem); /* compatibility with x86 */ + static int __init setup_isolnodes(char *str) { char buf[MAX_NUMNODES * 5]; @@ -209,7 +268,7 @@ early_param("vmalloc", parse_vmalloc); /* * Determine for each controller where its lowmem is mapped and how much of * it is mapped there. On controller zero, the first few megabytes are - * already mapped in as code at MEM_SV_INTRPT, so in principle we could + * already mapped in as code at MEM_SV_START, so in principle we could * start our data mappings higher up, but for now we don't bother, to avoid * additional confusion. * @@ -614,11 +673,12 @@ static void __init setup_bootmem_allocator_node(int i) /* * Throw away any memory aliased by the PCI region. */ - if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) - reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), - PFN_PHYS(pci_reserve_end_pfn - - pci_reserve_start_pfn), + if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) { + start = max(pci_reserve_start_pfn, start); + end = min(pci_reserve_end_pfn, end); + reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start), BOOTMEM_EXCLUSIVE); + } #endif } @@ -628,6 +688,31 @@ static void __init setup_bootmem_allocator(void) for (i = 0; i < MAX_NUMNODES; ++i) setup_bootmem_allocator_node(i); + /* Reserve any memory excluded by "memmap" arguments. */ + for (i = 0; i < memmap_nr; ++i) { + struct memmap_entry *m = &memmap_map[i]; + reserve_bootmem(m->addr, m->size, 0); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) { + /* Make sure the initrd memory region is not modified. */ + if (reserve_bootmem(initrd_start, initrd_end - initrd_start, + BOOTMEM_EXCLUSIVE)) { + pr_crit("The initrd memory region has been polluted. Disabling it.\n"); + initrd_start = 0; + initrd_end = 0; + } else { + /* + * Translate initrd_start & initrd_end from PA to VA for + * future access. + */ + initrd_start += PAGE_OFFSET; + initrd_end += PAGE_OFFSET; + } + } +#endif + #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); @@ -961,9 +1046,6 @@ void setup_cpu(int boot) arch_local_irq_unmask(INT_DMATLB_MISS); arch_local_irq_unmask(INT_DMATLB_ACCESS); #endif -#if CHIP_HAS_SN_PROC() - arch_local_irq_unmask(INT_SNITLB_MISS); -#endif #ifdef __tilegx__ arch_local_irq_unmask(INT_SINGLE_STEP_K); #endif @@ -978,10 +1060,6 @@ void setup_cpu(int boot) /* Static network is not restricted. */ __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); #endif -#if CHIP_HAS_SN_PROC() - __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); - __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); -#endif /* * Set the MPL for interrupt control 0 & 1 to the corresponding @@ -1029,6 +1107,10 @@ static void __init load_hv_initrd(void) int fd, rc; void *initrd; + /* If initrd has already been set, skip initramfs file in hvfs. */ + if (initrd_start) + return; + fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { if (set_initramfs_file) { @@ -1067,6 +1149,25 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end) free_bootmem(__pa(begin), end - begin); } +static int __init setup_initrd(char *str) +{ + char *endp; + unsigned long initrd_size; + + initrd_size = str ? simple_strtoul(str, &endp, 0) : 0; + if (initrd_size == 0 || *endp != '@') + return -EINVAL; + + initrd_start = simple_strtoul(endp+1, &endp, 0); + if (initrd_start == 0) + return -EINVAL; + + initrd_end = initrd_start + initrd_size; + + return 0; +} +early_param("initrd", setup_initrd); + #else static inline void load_hv_initrd(void) {} #endif /* CONFIG_BLK_DEV_INITRD */ @@ -1134,7 +1235,7 @@ static void __init validate_va(void) #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ /* * Similarly, make sure we're only using allowed VAs. - * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, + * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START, * and 0 .. KERNEL_HIGH_VADDR. * In addition, make sure we CAN'T use the end of memory, since * we use the last chunk of each pgd for the pgd_list. @@ -1149,7 +1250,7 @@ static void __init validate_va(void) if (range.size == 0) break; if (range.start <= MEM_USER_INTRPT && - range.start + range.size >= MEM_HV_INTRPT) + range.start + range.size >= MEM_HV_START) user_kernel_ok = 1; if (range.start == 0) max_va = range.size; @@ -1183,7 +1284,6 @@ static void __init validate_va(void) struct cpumask __write_once cpu_lotar_map; EXPORT_SYMBOL(cpu_lotar_map); -#if CHIP_HAS_CBOX_HOME_MAP() /* * hash_for_home_map lists all the tiles that hash-for-home data * will be cached on. Note that this may includes tiles that are not @@ -1193,7 +1293,6 @@ EXPORT_SYMBOL(cpu_lotar_map); */ struct cpumask hash_for_home_map; EXPORT_SYMBOL(hash_for_home_map); -#endif /* * cpu_cacheable_map lists all the cpus whose caches the hypervisor can @@ -1286,7 +1385,6 @@ static void __init setup_cpu_maps(void) cpu_lotar_map = *cpu_possible_mask; } -#if CHIP_HAS_CBOX_HOME_MAP() /* Retrieve set of CPUs used for hash-for-home caching */ rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, (HV_VirtAddr) hash_for_home_map.bits, @@ -1294,9 +1392,6 @@ static void __init setup_cpu_maps(void) if (rc < 0) early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); -#else - cpu_cacheable_map = *cpu_possible_mask; -#endif } @@ -1492,7 +1587,7 @@ void __init setup_per_cpu_areas(void) /* Update the vmalloc mapping and page home. */ unsigned long addr = (unsigned long)ptr + i; - pte_t *ptep = virt_to_pte(NULL, addr); + pte_t *ptep = virt_to_kpte(addr); pte_t pte = *ptep; BUG_ON(pfn != pte_pfn(pte)); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); @@ -1501,12 +1596,12 @@ void __init setup_per_cpu_areas(void) /* Update the lowmem mapping for consistency. */ lowmem_va = (unsigned long)pfn_to_kaddr(pfn); - ptep = virt_to_pte(NULL, lowmem_va); + ptep = virt_to_kpte(lowmem_va); if (pte_huge(*ptep)) { printk(KERN_DEBUG "early shatter of huge page" " at %#lx\n", lowmem_va); shatter_pmd((pmd_t *)ptep); - ptep = virt_to_pte(NULL, lowmem_va); + ptep = virt_to_kpte(lowmem_va); BUG_ON(pte_huge(*ptep)); } BUG_ON(pfn != pte_pfn(*ptep)); @@ -1548,6 +1643,8 @@ insert_non_bus_resource(void) { struct resource *res = kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return NULL; res->name = "Non-Bus Physical Address Space"; res->start = (1ULL << 32); res->end = -1LL; @@ -1561,11 +1658,13 @@ insert_non_bus_resource(void) #endif static struct resource* __init -insert_ram_resource(u64 start_pfn, u64 end_pfn) +insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved) { struct resource *res = kzalloc(sizeof(struct resource), GFP_ATOMIC); - res->name = "System RAM"; + if (!res) + return NULL; + res->name = reserved ? "Reserved" : "System RAM"; res->start = start_pfn << PAGE_SHIFT; res->end = (end_pfn << PAGE_SHIFT) - 1; res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; @@ -1585,7 +1684,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn) static int __init request_standard_resources(void) { int i; - enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; + enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET }; #if defined(CONFIG_PCI) && !defined(__tilegx__) insert_non_bus_resource(); @@ -1600,11 +1699,11 @@ static int __init request_standard_resources(void) end_pfn > pci_reserve_start_pfn) { if (end_pfn > pci_reserve_end_pfn) insert_ram_resource(pci_reserve_end_pfn, - end_pfn); + end_pfn, 0); end_pfn = pci_reserve_start_pfn; } #endif - insert_ram_resource(start_pfn, end_pfn); + insert_ram_resource(start_pfn, end_pfn, 0); } code_resource.start = __pa(_text - CODE_DELTA); @@ -1615,6 +1714,13 @@ static int __init request_standard_resources(void) insert_resource(&iomem_resource, &code_resource); insert_resource(&iomem_resource, &data_resource); + /* Mark any "memmap" regions busy for the resource manager. */ + for (i = 0; i < memmap_nr; ++i) { + struct memmap_entry *m = &memmap_map[i]; + insert_ram_resource(PFN_DOWN(m->addr), + PFN_UP(m->addr + m->size - 1), 1); + } + #ifdef CONFIG_KEXEC insert_resource(&iomem_resource, &crashk_res); #endif |