From f467e4bfb50ca6af042f1b19b3556bd4aca854c3 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Wed, 11 Jan 2012 15:37:13 +0100 Subject: MIPS: Flush huge TLB When flushing TLB, if @vma is backed by huge page, we could flush huge TLB, due to that huge page is defined to be far from normal page. Signed-off-by: Hillf Danton Acked-by: David Daney Cc: linux-mips@linux-mips.org Cc: "Jayachandran C." Patchwork: https://patchwork.linux-mips.org/patch/2825/ Signed-off-by: David Daney Acked-by: Hillf Danton Patchwork: https://patchwork.linux-mips.org/patch/3114/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlb-r4k.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'arch/mips/mm/tlb-r4k.c') diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 0d394e0e8837..88dc49cfa163 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -120,22 +120,30 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; + int huge = is_vm_hugetlb_page(vma); ENTER_CRITICAL(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; + if (huge) { + start = round_down(start, HPAGE_SIZE); + end = round_up(end, HPAGE_SIZE); + size = (end - start) >> HPAGE_SHIFT; + } else { + start = round_down(start, PAGE_SIZE << 1); + end = round_up(end, PAGE_SIZE << 1); + size = (end - start) >> (PAGE_SHIFT + 1); + } if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); while (start < end) { int idx; write_c0_entryhi(start | newpid); - start += (PAGE_SIZE << 1); + if (huge) + start += HPAGE_SIZE; + else + start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); -- cgit v1.2.1 From d7a887a73dec6c387b02a966a71aac767bbd9ce6 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 11 Jan 2012 15:37:16 +0100 Subject: MIPS: Delete unused function add_temporary_entry. Only available for R4000 style TLBs anyway and proper ordering of initialization code made this crude interface unncecessary. Signed-off-by: Ralf Baechle --- arch/mips/mm/tlb-r4k.c | 47 ----------------------------------------------- 1 file changed, 47 deletions(-) (limited to 'arch/mips/mm/tlb-r4k.c') diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 88dc49cfa163..74d67c885743 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -376,51 +376,6 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, EXIT_CRITICAL(flags); } -/* - * Used for loading TLB entries before trap_init() has started, when we - * don't actually want to add a wired entry which remains throughout the - * lifetime of the system - */ - -static int temp_tlb_entry __cpuinitdata; - -__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, - unsigned long entryhi, unsigned long pagemask) -{ - int ret = 0; - unsigned long flags; - unsigned long wired; - unsigned long old_pagemask; - unsigned long old_ctx; - - ENTER_CRITICAL(flags); - /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi(); - old_pagemask = read_c0_pagemask(); - wired = read_c0_wired(); - if (--temp_tlb_entry < wired) { - printk(KERN_WARNING - "No TLB space left for add_temporary_entry\n"); - ret = -ENOSPC; - goto out; - } - - write_c0_index(temp_tlb_entry); - write_c0_pagemask(pagemask); - write_c0_entryhi(entryhi); - write_c0_entrylo0(entrylo0); - write_c0_entrylo1(entrylo1); - mtc0_tlbw_hazard(); - tlb_write_indexed(); - tlbw_use_hazard(); - - write_c0_entryhi(old_ctx); - write_c0_pagemask(old_pagemask); -out: - EXIT_CRITICAL(flags); - return ret; -} - static int __cpuinitdata ntlb; static int __init set_ntlb(char *str) { @@ -458,8 +413,6 @@ void __cpuinit tlb_init(void) write_c0_pagegrain(pg); } - temp_tlb_entry = current_cpu_data.tlbsize - 1; - /* From this point on the ARC firmware is dead. */ local_flush_tlb_all(); -- cgit v1.2.1