summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/include/asm/asm-prototypes.h1
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/compiler.h35
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mmu_context.h1
-rw-r--r--arch/mips/include/asm/tlbex.h9
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kernel/uprobes.c2
-rw-r--r--arch/mips/kernel/vdso.c20
-rw-r--r--arch/mips/kvm/mmu.c10
-rw-r--r--arch/mips/lasat/image/Makefile2
-rw-r--r--arch/mips/lib/multi3.c6
-rw-r--r--arch/mips/mm/tlb-funcs.S3
-rw-r--r--arch/mips/mm/tlbex.c101
17 files changed, 120 insertions, 84 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2af13b162e5e..35511999156a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -33,6 +33,7 @@ config MIPS
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HANDLE_DOMAIN_IRQ
+ select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 5425df002a6b..d74b3742fa5d 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -309,7 +309,7 @@ endif
# instead of .eh_frame so we don't discard them.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-LDFLAGS += -m $(ld-emul)
+KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index abe77add8789..3c453a1f1ff1 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -92,7 +92,7 @@ UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)
vmlinuzobjs-y += $(obj)/piggy.o
quiet_cmd_zld = LD $@
- cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
+ cmd_zld = $(LD) $(KBUILD_LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -s $@
vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
diff --git a/arch/mips/include/asm/asm-prototypes.h b/arch/mips/include/asm/asm-prototypes.h
index 576f1a62dea9..f901ed043c71 100644
--- a/arch/mips/include/asm/asm-prototypes.h
+++ b/arch/mips/include/asm/asm-prototypes.h
@@ -5,3 +5,4 @@
#include <asm-generic/asm-prototypes.h>
#include <linux/uaccess.h>
#include <asm/ftrace.h>
+#include <asm/mmu_context.h>
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 0269b3de8b51..d4ea7a5b60cf 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -122,8 +122,8 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \
- " move %0, %1 \n" \
" .set mips0 \n" \
+ " move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
@@ -190,9 +190,11 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
__asm__ __volatile__(
" .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
+ " .set mips0 \n"
" subu %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 1f \n"
+ " .set "MIPS_ISA_LEVEL" \n"
" sc %1, %2 \n"
"\t" __scbeqz " %1, 1b \n"
"1: \n"
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index e081a265f422..cc2eb1b06050 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -8,6 +8,41 @@
#ifndef _ASM_COMPILER_H
#define _ASM_COMPILER_H
+/*
+ * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
+ * compiler that a particular code path will never be hit. This allows it to be
+ * optimised out of the generated binary.
+ *
+ * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
+ * that can lead to instructions from beyond an unreachable statement being
+ * incorrectly reordered into earlier delay slots if the unreachable statement
+ * is the only content of a case in a switch statement. This can lead to
+ * seemingly random behaviour, such as invalid memory accesses from incorrectly
+ * reordered loads or stores. See this potential GCC fix for details:
+ *
+ * https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
+ *
+ * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
+ * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
+ * stands out in GCC commit logs, but these newer GCC versions generate very
+ * different code for the testcase which doesn't exhibit the bug.
+ *
+ * GCC also handles stack allocation suboptimally when calling noreturn
+ * functions or calling __builtin_unreachable():
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * We work around both of these issues by placing a volatile asm statement,
+ * which GCC is prevented from reordering past, prior to __builtin_unreachable
+ * calls.
+ *
+ * The .insn statement is required to ensure that any branches to the
+ * statement, which sadly must be kept due to the asm statement, are known to
+ * be branches to code and satisfy linker requirements for microMIPS kernels.
+ */
+#undef barrier_before_unreachable
+#define barrier_before_unreachable() asm volatile(".insn")
+
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
#define GCC_IMM_ASM() "n"
#define GCC_REG_ACCUM "$0"
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a9af1d2dcd69..2c1c53d12179 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index b509371a6b0c..94414561de0e 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -32,6 +32,7 @@ do { \
} while (0)
extern void tlbmiss_handler_setup_pgd(unsigned long);
+extern char tlbmiss_handler_setup_pgd_end[];
/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h
index 4ed7ebed3bf1..6d97e23f30ab 100644
--- a/arch/mips/include/asm/tlbex.h
+++ b/arch/mips/include/asm/tlbex.h
@@ -24,4 +24,13 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r,
enum tlb_write_entry wmode);
+extern void handle_tlbl(void);
+extern char handle_tlbl_end[];
+
+extern void handle_tlbs(void);
+extern char handle_tlbs_end[];
+
+extern void handle_tlbm(void);
+extern char handle_tlbm_end[];
+
#endif /* __ASM_TLBEX_H */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index f8871d5b7eb3..9dab0ed1b227 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -67,14 +67,12 @@
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
+#include <asm/tlbex.h>
#include <asm/uasm.h>
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
-extern u32 handle_tlbl[];
-extern u32 handle_tlbs[];
-extern u32 handle_tlbm[];
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index f7a0645ccb82..4aaff3b3175c 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -224,7 +224,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr)
{
- return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+ return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
}
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 019035d7225c..8f845f6e5f42 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -20,6 +21,7 @@
#include <asm/abi.h>
#include <asm/mips-cps.h>
+#include <asm/page.h>
#include <asm/vdso.h>
/* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
base = get_unmapped_area(NULL, 0, size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+ }
+
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index ee64db032793..d8dcdb350405 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/lasat/image/Makefile b/arch/mips/lasat/image/Makefile
index 9ab1326f57c9..78ce4cff1012 100644
--- a/arch/mips/lasat/image/Makefile
+++ b/arch/mips/lasat/image/Makefile
@@ -38,7 +38,7 @@ $(obj)/rom.bin: $(obj)/rom
# Rule to make the bootloader
$(obj)/rom: $(addprefix $(obj)/,$(OBJECTS))
- $(LD) $(LDFLAGS) $(LDSCRIPT) -o $@ $^
+ $(LD) $(KBUILD_LDFLAGS) $(LDSCRIPT) -o $@ $^
$(obj)/%.o: $(obj)/%.gz
$(LD) -r -o $@ -b binary $<
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
index 111ad475aa0c..4c2483f410c2 100644
--- a/arch/mips/lib/multi3.c
+++ b/arch/mips/lib/multi3.c
@@ -4,12 +4,12 @@
#include "libgcc.h"
/*
- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
- * specific case only we'll implement it here.
+ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
+ * that specific case only we implement that intrinsic here.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/
-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b)
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index a5427c6e9757..00fef578c8cd 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -12,16 +12,17 @@
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/
#include <asm/asm.h>
+#include <asm/export.h>
#include <asm/regdef.h>
#define FASTPATH_SIZE 128
-EXPORT(tlbmiss_handler_setup_pgd_start)
LEAF(tlbmiss_handler_setup_pgd)
1: j 1b /* Dummy, will be replaced. */
.space 64
END(tlbmiss_handler_setup_pgd)
EXPORT(tlbmiss_handler_setup_pgd_end)
+EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd)
LEAF(handle_tlbm)
.space FASTPATH_SIZE * 4
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 49312a14cd17..067714291643 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -31,6 +31,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
+#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/war.h>
#include <asm/uasm.h>
@@ -253,8 +254,10 @@ static void output_pgtable_bits_defines(void)
pr_debug("\n");
}
-static inline void dump_handler(const char *symbol, const u32 *handler, int count)
+static inline void dump_handler(const char *symbol, const void *start, const void *end)
{
+ unsigned int count = (end - start) / sizeof(u32);
+ const u32 *handler = start;
int i;
pr_debug("LEAF(%s)\n", symbol);
@@ -402,12 +405,6 @@ static void build_restore_work_registers(u32 **p)
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
* we cannot do r3000 under these circumstances.
*
- * Declare pgd_current here instead of including mmu_context.h to avoid type
- * conflicts for tlbmiss_handler_setup_pgd
- */
-extern unsigned long pgd_current[];
-
-/*
* The R3000 TLB handler is simple.
*/
static void build_r3000_tlb_refill_handler(void)
@@ -444,8 +441,7 @@ static void build_r3000_tlb_refill_handler(void)
memcpy((void *)ebase, tlb_handler, 0x80);
local_flush_icache_range(ebase, ebase + 0x80);
-
- dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
+ dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -1465,8 +1461,7 @@ static void build_r4000_tlb_refill_handler(void)
memcpy((void *)ebase, final_handler, 0x100);
local_flush_icache_range(ebase, ebase + 0x100);
-
- dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
+ dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
}
static void setup_pw(void)
@@ -1568,31 +1563,21 @@ static void build_loongson3_tlb_refill_handler(void)
uasm_resolve_relocs(relocs, labels);
memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
local_flush_icache_range(ebase + 0x80, ebase + 0x100);
- dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
+ dump_handler("loongson3_tlb_refill",
+ (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
}
-extern u32 handle_tlbl[], handle_tlbl_end[];
-extern u32 handle_tlbs[], handle_tlbs_end[];
-extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd_start[];
-extern u32 tlbmiss_handler_setup_pgd[];
-EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
-extern u32 tlbmiss_handler_setup_pgd_end[];
-
static void build_setup_pgd(void)
{
const int a0 = 4;
const int __maybe_unused a1 = 5;
const int __maybe_unused a2 = 6;
- u32 *p = tlbmiss_handler_setup_pgd_start;
- const int tlbmiss_handler_setup_pgd_size =
- tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
+ u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
#endif
- memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
- sizeof(tlbmiss_handler_setup_pgd[0]));
+ memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
pgd_reg = allocate_kscratch();
@@ -1645,15 +1630,15 @@ static void build_setup_pgd(void)
else
uasm_i_nop(&p);
#endif
- if (p >= tlbmiss_handler_setup_pgd_end)
+ if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
- (unsigned int)(p - tlbmiss_handler_setup_pgd));
+ (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
- tlbmiss_handler_setup_pgd_size);
+ tlbmiss_handler_setup_pgd_end);
}
static void
@@ -1922,12 +1907,11 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
static void build_r3000_tlb_load_handler(void)
{
- u32 *p = handle_tlbl;
- const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
+ u32 *p = (u32 *)handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
+ memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1941,24 +1925,23 @@ static void build_r3000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbl_end)
+ if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbl));
+ (unsigned int)(p - (u32 *)handle_tlbl));
- dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
+ dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
}
static void build_r3000_tlb_store_handler(void)
{
- u32 *p = handle_tlbs;
- const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
+ u32 *p = (u32 *)handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
+ memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -1972,24 +1955,23 @@ static void build_r3000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbs_end)
+ if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbs));
+ (unsigned int)(p - (u32 *)handle_tlbs));
- dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
+ dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
}
static void build_r3000_tlb_modify_handler(void)
{
- u32 *p = handle_tlbm;
- const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
+ u32 *p = (u32 *)handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
+ memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2003,14 +1985,14 @@ static void build_r3000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbm_end)
+ if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbm));
+ (unsigned int)(p - (u32 *)handle_tlbm));
- dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
+ dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -2102,12 +2084,11 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
static void build_r4000_tlb_load_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
- const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
+ memset(p, 0, handle_tlbl_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2288,25 +2269,24 @@ static void build_r4000_tlb_load_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbl_end)
+ if (p >= (u32 *)handle_tlbl_end)
panic("TLB load handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbl));
+ (unsigned int)(p - (u32 *)handle_tlbl));
- dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
+ dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
}
static void build_r4000_tlb_store_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
- const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
+ memset(p, 0, handle_tlbs_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2343,25 +2323,24 @@ static void build_r4000_tlb_store_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbs_end)
+ if (p >= (u32 *)handle_tlbs_end)
panic("TLB store handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbs));
+ (unsigned int)(p - (u32 *)handle_tlbs));
- dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
+ dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
}
static void build_r4000_tlb_modify_handler(void)
{
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
- const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
- memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
+ memset(p, 0, handle_tlbm_end - (char *)p);
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -2399,14 +2378,14 @@ static void build_r4000_tlb_modify_handler(void)
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p);
- if (p >= handle_tlbm_end)
+ if (p >= (u32 *)handle_tlbm_end)
panic("TLB modify handler fastpath space exceeded");
uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
- (unsigned int)(p - handle_tlbm));
+ (unsigned int)(p - (u32 *)handle_tlbm));
- dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
+ dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
}
static void flush_tlb_handlers(void)
OpenPOWER on IntegriCloud