diff options
Diffstat (limited to 'arch/sparc/mm')
| -rw-r--r-- | arch/sparc/mm/fault_32.c | 15 | ||||
| -rw-r--r-- | arch/sparc/mm/fault_64.c | 11 | ||||
| -rw-r--r-- | arch/sparc/mm/gup.c | 41 | ||||
| -rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 14 | ||||
| -rw-r--r-- | arch/sparc/mm/init_32.c | 132 | ||||
| -rw-r--r-- | arch/sparc/mm/init_64.c | 105 | ||||
| -rw-r--r-- | arch/sparc/mm/srmmu.c | 32 | ||||
| -rw-r--r-- | arch/sparc/mm/tlb.c | 2 | ||||
| -rw-r--r-- | arch/sparc/mm/tsb.c | 21 |
9 files changed, 247 insertions, 126 deletions
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index a8103a84b4ac..b0440b0edd97 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -127,19 +127,11 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long addr) { - siginfo_t info; - - info.si_signo = sig; - info.si_code = code; - info.si_errno = 0; - info.si_addr = (void __user *) addr; - info.si_trapno = 0; - if (unlikely(show_unhandled_signals)) - show_signal_msg(regs, sig, info.si_code, + show_signal_msg(regs, sig, code, addr, current); - force_sig_info (sig, &info, current); + force_sig_fault(sig, code, (void __user *) addr, 0, current); } static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) @@ -174,7 +166,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned int fixup; unsigned long g2; int from_user = !(regs->psr & PSR_PS); - int fault, code; + int code; + vm_fault_t fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (text_fault) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 41363f46797b..8f8a604c1300 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -170,11 +170,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int fault_code) { unsigned long addr; - siginfo_t info; - info.si_code = code; - info.si_signo = sig; - info.si_errno = 0; if (fault_code & FAULT_CODE_ITLB) { addr = regs->tpc; } else { @@ -187,13 +183,11 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, else addr = fault_addr; } - info.si_addr = (void __user *) addr; - info.si_trapno = 0; if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, code, addr, current); - force_sig_info(sig, &info, current); + force_sig_fault(sig, code, (void __user *) addr, 0, current); } static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) @@ -284,7 +278,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned int insn = 0; - int si_code, fault_code, fault; + int si_code, fault_code; + vm_fault_t fault; unsigned long address, mm_rss; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 5335ba3c850e..aee6dba83d0e 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/rwsem.h> #include <asm/pgtable.h> +#include <asm/adi.h> /* * The performance critical leaf functions are made noinline otherwise gcc @@ -192,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +/* + * Note a difference with get_user_pages_fast: this always returns the + * number of pages pinned, 0 if no pages were pinned. + */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { @@ -201,6 +206,24 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, pgd_t *pgdp; int nr = 0; +#ifdef CONFIG_SPARC64 + if (adi_capable()) { + long addr = start; + + /* If userspace has passed a versioned address, kernel + * will not find it in the VMAs since it does not store + * the version tags in the list of VMAs. Storing version + * tags in list of VMAs is impractical since they can be + * changed any time from userspace without dropping into + * kernel. Any address search in VMAs will be done with + * non-versioned addresses. Ensure the ADI version bits + * are dropped here by sign extending the last bit before + * ADI bits. IOMMU does not implement version tags. + */ + addr = (addr << (long)adi_nbits()) >> (long)adi_nbits(); + start = addr; + } +#endif start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; @@ -231,6 +254,24 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, pgd_t *pgdp; int nr = 0; +#ifdef CONFIG_SPARC64 + if (adi_capable()) { + long addr = start; + + /* If userspace has passed a versioned address, kernel + * will not find it in the VMAs since it does not store + * the version tags in the list of VMAs. Storing version + * tags in list of VMAs is impractical since they can be + * changed any time from userspace without dropping into + * kernel. Any address search in VMAs will be done with + * non-versioned addresses. Ensure the ADI version bits + * are dropped here by sign extending the last bit before + * ADI bits. IOMMU does not implements version tags, + */ + addr = (addr << (long)adi_nbits()) >> (long)adi_nbits(); + start = addr; + } +#endif start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 0112d6942288..f78793a06bbd 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -182,8 +182,20 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writeable) { unsigned int shift = huge_page_shift(hstate_vma(vma)); + pte_t pte; - return hugepage_shift_to_tte(entry, shift); + pte = hugepage_shift_to_tte(entry, shift); + +#ifdef CONFIG_SPARC64 + /* If this vma has ADI enabled on it, turn on TTE.mcd + */ + if (vma->vm_flags & VM_SPARC_ADI) + return pte_mkmcd(pte); + else + return pte_mknotmcd(pte); +#else + return pte; +#endif } static unsigned int sun4v_huge_tte_to_shift(pte_t entry) diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 95fe4f081ba3..d900952bfc5f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -22,7 +22,7 @@ #include <linux/initrd.h> #include <linux/init.h> #include <linux/highmem.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/gfp.h> @@ -101,13 +101,46 @@ static unsigned long calc_max_low_pfn(void) return tmp; } +static void __init find_ramdisk(unsigned long end_of_phys_memory) +{ +#ifdef CONFIG_BLK_DEV_INITRD + unsigned long size; + + /* Now have to check initial ramdisk, so that it won't pass + * the end of memory + */ + if (sparc_ramdisk_image) { + if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) + sparc_ramdisk_image -= KERNBASE; + initrd_start = sparc_ramdisk_image + phys_base; + initrd_end = initrd_start + sparc_ramdisk_size; + if (initrd_end > end_of_phys_memory) { + printk(KERN_CRIT "initrd extends beyond end of memory " + "(0x%016lx > 0x%016lx)\ndisabling initrd\n", + initrd_end, end_of_phys_memory); + initrd_start = 0; + } else { + /* Reserve the initrd image area. */ + size = initrd_end - initrd_start; + memblock_reserve(initrd_start, size); + + initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; + initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; + } + } +#endif +} + unsigned long __init bootmem_init(unsigned long *pages_avail) { - unsigned long bootmap_size, start_pfn; - unsigned long end_of_phys_memory = 0UL; - unsigned long bootmap_pfn, bytes_avail, size; + unsigned long start_pfn, bytes_avail, size; + unsigned long end_of_phys_memory = 0; + unsigned long high_pages = 0; int i; + memblock_set_bottom_up(true); + memblock_allow_resize(); + bytes_avail = 0UL; for (i = 0; sp_banks[i].num_bytes != 0; i++) { end_of_phys_memory = sp_banks[i].base_addr + @@ -124,24 +157,25 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) if (sp_banks[i].num_bytes == 0) { sp_banks[i].base_addr = 0xdeadbeef; } else { + memblock_add(sp_banks[i].base_addr, + sp_banks[i].num_bytes); sp_banks[i+1].num_bytes = 0; sp_banks[i+1].base_addr = 0xdeadbeef; } break; } } + memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes); } /* Start with page aligned address of last symbol in kernel - * image. + * image. */ start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); /* Now shift down to get the real physical page frame number. */ start_pfn >>= PAGE_SHIFT; - bootmap_pfn = start_pfn; - max_pfn = end_of_phys_memory >> PAGE_SHIFT; max_low_pfn = max_pfn; @@ -150,85 +184,19 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); max_low_pfn = calc_max_low_pfn(); + high_pages = calc_highpages(); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", - calc_highpages() >> (20 - PAGE_SHIFT)); + high_pages >> (20 - PAGE_SHIFT)); } -#ifdef CONFIG_BLK_DEV_INITRD - /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ - if (sparc_ramdisk_image) { - if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) - sparc_ramdisk_image -= KERNBASE; - initrd_start = sparc_ramdisk_image + phys_base; - initrd_end = initrd_start + sparc_ramdisk_size; - if (initrd_end > end_of_phys_memory) { - printk(KERN_CRIT "initrd extends beyond end of memory " - "(0x%016lx > 0x%016lx)\ndisabling initrd\n", - initrd_end, end_of_phys_memory); - initrd_start = 0; - } - if (initrd_start) { - if (initrd_start >= (start_pfn << PAGE_SHIFT) && - initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) - bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; - } - } -#endif - /* Initialize the boot-time allocator. */ - bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, - max_low_pfn); - - /* Now register the available physical memory with the - * allocator. - */ - *pages_avail = 0; - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long curr_pfn, last_pfn; + find_ramdisk(end_of_phys_memory); - curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - if (curr_pfn >= max_low_pfn) - break; - - last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - if (last_pfn > max_low_pfn) - last_pfn = max_low_pfn; - - /* - * .. finally, did all the rounding and playing - * around just make the area go away? - */ - if (last_pfn <= curr_pfn) - continue; - - size = (last_pfn - curr_pfn) << PAGE_SHIFT; - *pages_avail += last_pfn - curr_pfn; - - free_bootmem(sp_banks[i].base_addr, size); - } - -#ifdef CONFIG_BLK_DEV_INITRD - if (initrd_start) { - /* Reserve the initrd image area. */ - size = initrd_end - initrd_start; - reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT); - *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; - - initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; - initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; - } -#endif /* Reserve the kernel text/data/bss. */ size = (start_pfn << PAGE_SHIFT) - phys_base; - reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT); - *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; + memblock_reserve(phys_base, size); - /* Reserve the bootmem map. We do not account for it - * in pages_avail because we will release that memory - * in free_all_bootmem. - */ - size = bootmap_size; - reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT); - *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; + size = memblock_phys_mem_size() - memblock_reserved_size(); + *pages_avail = (size >> PAGE_SHIFT) - high_pages; return max_pfn; } @@ -296,7 +264,7 @@ void __init mem_init(void) i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); i += 1; sparc_valid_addr_bitmap = (unsigned long *) - __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL); + memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL); if (sparc_valid_addr_bitmap == NULL) { prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); @@ -308,7 +276,7 @@ void __init mem_init(void) max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(max_low_pfn << PAGE_SHIFT); - free_all_bootmem(); + memblock_free_all(); for (i = 0; sp_banks[i].num_bytes != 0; i++) { unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; @@ -322,7 +290,7 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - + mem_init_print_info(NULL); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 995f9490334d..3c8aac21f426 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -11,7 +11,7 @@ #include <linux/sched.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/initrd.h> @@ -25,7 +25,6 @@ #include <linux/sort.h> #include <linux/ioport.h> #include <linux/percpu.h> -#include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/gfp.h> @@ -206,9 +205,9 @@ inline void flush_dcache_page_impl(struct page *page) #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), ((tlb_type == spitfire) && - page_mapping(page) != NULL)); + page_mapping_file(page) != NULL)); #else - if (page_mapping(page) != NULL && + if (page_mapping_file(page) != NULL && tlb_type == spitfire) __flush_icache_page(__pa(page_address(page))); #endif @@ -490,7 +489,7 @@ void flush_dcache_page(struct page *page) this_cpu = get_cpu(); - mapping = page_mapping(page); + mapping = page_mapping_file(page); if (mapping && !mapping_mapped(mapping)) { int dirty = test_bit(PG_dcache_dirty, &page->flags); if (dirty) { @@ -1092,7 +1091,8 @@ static void __init allocate_node_data(int nid) #ifdef CONFIG_NEED_MULTIPLE_NODES unsigned long paddr; - paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); + paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data), + SMP_CACHE_BYTES, nid); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); @@ -1266,8 +1266,8 @@ static int __init grab_mlgroups(struct mdesc_handle *md) if (!count) return -ENOENT; - paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), - SMP_CACHE_BYTES); + paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup), + SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; @@ -1307,8 +1307,8 @@ static int __init grab_mblocks(struct mdesc_handle *md) if (!count) return -ENOENT; - paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), - SMP_CACHE_BYTES); + paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock), + SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; @@ -1383,6 +1383,7 @@ int __node_distance(int from, int to) } return numa_latency[from][to]; } +EXPORT_SYMBOL(__node_distance); static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { @@ -1620,7 +1621,7 @@ static void __init bootmem_init_nonnuma(void) (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); allocate_node_data(0); node_set_online(0); } @@ -1809,7 +1810,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, if (pgd_none(*pgd)) { pud_t *new; - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); alloc_bytes += PAGE_SIZE; pgd_populate(&init_mm, pgd, new); } @@ -1821,7 +1823,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, vstart = kernel_map_hugepud(vstart, vend, pud); continue; } - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } @@ -1834,7 +1837,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, vstart = kernel_map_hugepmd(vstart, vend, pmd); continue; } - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, + PAGE_SIZE); alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } @@ -2540,12 +2544,12 @@ void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); - free_all_bootmem(); + memblock_free_all(); /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been - * initialized, and free_all_bootmem() initializes all the reserved + * initialized, and memblock_free_all() initializes all the reserved * deferred pages for us. */ register_page_bootmem_info(); @@ -3160,3 +3164,72 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) do_flush_tlb_kernel_range(start, end); } } + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this page has ADI enabled, copy over any ADI tags + * as well + */ + if (vma->vm_flags & VM_SPARC_ADI) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_user_highpage); + +void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this platform is ADI enabled, copy any ADI tags + * as well + */ + if (adi_capable()) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_highpage); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 1d70c3f6d986..a6142c5abf61 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -11,7 +11,7 @@ #include <linux/seq_file.h> #include <linux/spinlock.h> -#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/kdebug.h> @@ -37,7 +37,6 @@ #include <asm/mbus.h> #include <asm/page.h> #include <asm/asi.h> -#include <asm/msi.h> #include <asm/smp.h> #include <asm/io.h> @@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) set_pte((pte_t *)ctxp, pte); } +/* + * Locations of MSI Registers. + */ +#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ + +/* + * Useful bits in the MSI Registers. + */ +#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ + +static void msi_set_sync(void) +{ + __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" + "andn %%g3, %2, %%g3\n\t" + "sta %%g3, [%0] %1\n\t" : : + "r" (MSI_MBUS_ARBEN), + "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); +} + void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ @@ -285,13 +303,13 @@ static void __init srmmu_nocache_init(void) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; - srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, - SRMMU_NOCACHE_ALIGN_MAX, 0UL); + srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size, + SRMMU_NOCACHE_ALIGN_MAX, 0UL); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = - __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), - SMP_CACHE_BYTES, 0UL); + memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES, 0UL); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); @@ -449,7 +467,7 @@ static void __init sparc_context_init(int numctx) unsigned long size; size = numctx * sizeof(struct ctx_list); - ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); + ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b5cfab711651..3d72d2deb13b 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, goto no_cache_flush; /* A real file page? */ - mapping = page_mapping(page); + mapping = page_mapping_file(page); if (!mapping) goto no_cache_flush; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 75a04c1a2383..f5edc28aa3a5 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -546,6 +546,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.sparc64_ctx_val = 0UL; + mm->context.tag_store = NULL; + spin_lock_init(&mm->context.tag_lock); + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) /* We reset them to zero because the fork() page copying * will re-increment the counters as the parent PTEs are @@ -611,4 +614,22 @@ void destroy_context(struct mm_struct *mm) } spin_unlock_irqrestore(&ctx_alloc_lock, flags); + + /* If ADI tag storage was allocated for this task, free it */ + if (mm->context.tag_store) { + tag_storage_desc_t *tag_desc; + unsigned long max_desc; + unsigned char *tags; + + tag_desc = mm->context.tag_store; + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + for (i = 0; i < max_desc; i++) { + tags = tag_desc->tags; + tag_desc->tags = NULL; + kfree(tags); + tag_desc++; + } + kfree(mm->context.tag_store); + mm->context.tag_store = NULL; + } } |

