summaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/config.mk31
-rw-r--r--arch/arc/cpu/arc700/Makefile13
-rw-r--r--arch/arc/cpu/arc700/cache.c138
-rw-r--r--arch/arc/cpu/arc700/config.mk7
-rw-r--r--arch/arc/cpu/arc700/cpu.c47
-rw-r--r--arch/arc/cpu/arc700/interrupts.c142
-rw-r--r--arch/arc/cpu/arc700/reset.c19
-rw-r--r--arch/arc/cpu/arc700/start.S241
-rw-r--r--arch/arc/cpu/arc700/timer.c24
-rw-r--r--arch/arc/cpu/arc700/u-boot.lds72
-rw-r--r--arch/arc/include/asm/arch-arc700/hardware.h10
-rw-r--r--arch/arc/include/asm/arcregs.h55
-rw-r--r--arch/arc/include/asm/bitops.h19
-rw-r--r--arch/arc/include/asm/byteorder.h23
-rw-r--r--arch/arc/include/asm/cache.h23
-rw-r--r--arch/arc/include/asm/config.h12
-rw-r--r--arch/arc/include/asm/errno.h1
-rw-r--r--arch/arc/include/asm/global_data.h19
-rw-r--r--arch/arc/include/asm/io.h218
-rw-r--r--arch/arc/include/asm/posix_types.h39
-rw-r--r--arch/arc/include/asm/ptrace.h50
-rw-r--r--arch/arc/include/asm/sections.h14
-rw-r--r--arch/arc/include/asm/string.h27
-rw-r--r--arch/arc/include/asm/types.h55
-rw-r--r--arch/arc/include/asm/u-boot-arc.h12
-rw-r--r--arch/arc/include/asm/u-boot.h15
-rw-r--r--arch/arc/include/asm/unaligned.h1
-rw-r--r--arch/arc/lib/Makefile16
-rw-r--r--arch/arc/lib/bootm.c106
-rw-r--r--arch/arc/lib/memcmp.S121
-rw-r--r--arch/arc/lib/memcpy-700.S63
-rw-r--r--arch/arc/lib/memset.S62
-rw-r--r--arch/arc/lib/relocate.c72
-rw-r--r--arch/arc/lib/sections.c21
-rw-r--r--arch/arc/lib/strchr-700.S141
-rw-r--r--arch/arc/lib/strcmp.S97
-rw-r--r--arch/arc/lib/strcpy-700.S67
-rw-r--r--arch/arc/lib/strlen.S80
38 files changed, 2173 insertions, 0 deletions
diff --git a/arch/arc/config.mk b/arch/arc/config.mk
new file mode 100644
index 0000000000..76f4f7c0dd
--- /dev/null
+++ b/arch/arc/config.mk
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+ifndef CONFIG_SYS_BIG_ENDIAN
+CONFIG_SYS_LITTLE_ENDIAN = 1
+endif
+
+ifdef CONFIG_SYS_LITTLE_ENDIAN
+CROSS_COMPILE ?= arc-buildroot-linux-uclibc-
+endif
+
+ifdef CONFIG_SYS_BIG_ENDIAN
+CROSS_COMPILE ?= arceb-buildroot-linux-uclibc-
+PLATFORM_LDFLAGS += -EB
+endif
+
+PLATFORM_CPPFLAGS += -ffixed-r25 -D__ARC__ -DCONFIG_ARC -gdwarf-2
+
+LDSCRIPT := $(SRCTREE)/$(CPUDIR)/u-boot.lds
+
+# Needed for relocation
+LDFLAGS_FINAL += -pie
+
+# Load address for standalone apps
+CONFIG_STANDALONE_LOAD_ADDR ?= 0x82000000
+
+# Support generic board on ARC
+__HAVE_ARCH_GENERIC_BOARD := y
diff --git a/arch/arc/cpu/arc700/Makefile b/arch/arc/cpu/arc700/Makefile
new file mode 100644
index 0000000000..cdc5002290
--- /dev/null
+++ b/arch/arc/cpu/arc700/Makefile
@@ -0,0 +1,13 @@
+#
+# Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+extra-y += start.o
+
+obj-y += cache.o
+obj-y += cpu.o
+obj-y += interrupts.o
+obj-y += reset.o
+obj-y += timer.o
diff --git a/arch/arc/cpu/arc700/cache.c b/arch/arc/cpu/arc700/cache.c
new file mode 100644
index 0000000000..39d522d22f
--- /dev/null
+++ b/arch/arc/cpu/arc700/cache.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <config.h>
+#include <asm/arcregs.h>
+
+/* Bit values in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE (1 << 0)
+
+/* Bit values in DC_CTRL */
+#define DC_CTRL_CACHE_DISABLE (1 << 0)
+#define DC_CTRL_INV_MODE_FLUSH (1 << 6)
+#define DC_CTRL_FLUSH_STATUS (1 << 8)
+
+int icache_status(void)
+{
+ return (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE) !=
+ IC_CTRL_CACHE_DISABLE;
+}
+
+void icache_enable(void)
+{
+ write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
+ ~IC_CTRL_CACHE_DISABLE);
+}
+
+void icache_disable(void)
+{
+ write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
+ IC_CTRL_CACHE_DISABLE);
+}
+
+void invalidate_icache_all(void)
+{
+#ifndef CONFIG_SYS_ICACHE_OFF
+ /* Any write to IC_IVIC register triggers invalidation of entire I$ */
+ write_aux_reg(ARC_AUX_IC_IVIC, 1);
+#endif /* CONFIG_SYS_ICACHE_OFF */
+}
+
+int dcache_status(void)
+{
+ return (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE) !=
+ DC_CTRL_CACHE_DISABLE;
+}
+
+void dcache_enable(void)
+{
+ write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
+ ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
+}
+
+void dcache_disable(void)
+{
+ write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
+ DC_CTRL_CACHE_DISABLE);
+}
+
+void flush_dcache_all(void)
+{
+ /* Do flush of entire cache */
+ write_aux_reg(ARC_AUX_DC_FLSH, 1);
+
+ /* Wait flush end */
+ while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+ ;
+}
+
+#ifndef CONFIG_SYS_DCACHE_OFF
+static void dcache_flush_line(unsigned addr)
+{
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_AUX_DC_PTAG, addr);
+#endif
+ write_aux_reg(ARC_AUX_DC_FLDL, addr);
+
+ /* Wait flush end */
+ while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+ ;
+
+#ifndef CONFIG_SYS_ICACHE_OFF
+ /*
+ * Invalidate I$ for addresses range just flushed from D$.
+ * If we try to execute data flushed above it will be valid/correct
+ */
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_AUX_IC_PTAG, addr);
+#endif
+ write_aux_reg(ARC_AUX_IC_IVIL, addr);
+#endif /* CONFIG_SYS_ICACHE_OFF */
+}
+#endif /* CONFIG_SYS_DCACHE_OFF */
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_SYS_DCACHE_OFF
+ unsigned int addr;
+
+ start = start & (~(CONFIG_SYS_CACHELINE_SIZE - 1));
+ end = end & (~(CONFIG_SYS_CACHELINE_SIZE - 1));
+
+ for (addr = start; addr <= end; addr += CONFIG_SYS_CACHELINE_SIZE)
+ dcache_flush_line(addr);
+#endif /* CONFIG_SYS_DCACHE_OFF */
+}
+
+void invalidate_dcache_range(unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_SYS_DCACHE_OFF
+ unsigned int addr;
+
+ start = start & (~(CONFIG_SYS_CACHELINE_SIZE - 1));
+ end = end & (~(CONFIG_SYS_CACHELINE_SIZE - 1));
+
+ for (addr = start; addr <= end; addr += CONFIG_SYS_CACHELINE_SIZE) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_AUX_DC_PTAG, addr);
+#endif
+ write_aux_reg(ARC_AUX_DC_IVDL, addr);
+ }
+#endif /* CONFIG_SYS_DCACHE_OFF */
+}
+
+void invalidate_dcache_all(void)
+{
+#ifndef CONFIG_SYS_DCACHE_OFF
+ /* Write 1 to DC_IVDC register triggers invalidation of entire D$ */
+ write_aux_reg(ARC_AUX_DC_IVDC, 1);
+#endif /* CONFIG_SYS_DCACHE_OFF */
+}
+
+void flush_cache(unsigned long start, unsigned long size)
+{
+ flush_dcache_range(start, start + size);
+}
diff --git a/arch/arc/cpu/arc700/config.mk b/arch/arc/cpu/arc700/config.mk
new file mode 100644
index 0000000000..3206ff47e3
--- /dev/null
+++ b/arch/arc/cpu/arc700/config.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+PLATFORM_CPPFLAGS += -mA7
diff --git a/arch/arc/cpu/arc700/cpu.c b/arch/arc/cpu/arc700/cpu.c
new file mode 100644
index 0000000000..50634b860f
--- /dev/null
+++ b/arch/arc/cpu/arc700/cpu.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int arch_cpu_init(void)
+{
+#ifdef CONFIG_SYS_ICACHE_OFF
+ icache_disable();
+#else
+ icache_enable();
+ invalidate_icache_all();
+#endif
+
+ flush_dcache_all();
+#ifdef CONFIG_SYS_DCACHE_OFF
+ dcache_disable();
+#else
+ dcache_enable();
+#endif
+ timer_init();
+
+/* In simulation (ISS) "CHIPID" and "ARCNUM" are all "ff" */
+ if ((read_aux_reg(ARC_AUX_IDENTITY) & 0xffffff00) == 0xffffff00)
+ gd->arch.running_on_hw = 0;
+ else
+ gd->arch.running_on_hw = 1;
+
+ gd->cpu_clk = CONFIG_SYS_CLK_FREQ;
+ gd->ram_size = CONFIG_SYS_SDRAM_SIZE;
+
+ return 0;
+}
+
+int arch_early_init_r(void)
+{
+ gd->bd->bi_memstart = CONFIG_SYS_SDRAM_BASE;
+ gd->bd->bi_memsize = CONFIG_SYS_SDRAM_SIZE;
+ return 0;
+}
diff --git a/arch/arc/cpu/arc700/interrupts.c b/arch/arc/cpu/arc700/interrupts.c
new file mode 100644
index 0000000000..d93a6eb547
--- /dev/null
+++ b/arch/arc/cpu/arc700/interrupts.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+
+/* Bit values in STATUS32 */
+#define E1_MASK (1 << 1) /* Level 1 interrupts enable */
+#define E2_MASK (1 << 2) /* Level 2 interrupts enable */
+
+int interrupt_init(void)
+{
+ return 0;
+}
+
+/*
+ * returns true if interrupts had been enabled before we disabled them
+ */
+int disable_interrupts(void)
+{
+ int status = read_aux_reg(ARC_AUX_STATUS32);
+ int state = (status | E1_MASK | E2_MASK) ? 1 : 0;
+
+ status &= ~(E1_MASK | E2_MASK);
+ /* STATUS32 register is updated indirectly with "FLAG" instruction */
+ __asm__("flag %0" : : "r" (status));
+ return state;
+}
+
+void enable_interrupts(void)
+{
+ unsigned int status = read_aux_reg(ARC_AUX_STATUS32);
+
+ status |= E1_MASK | E2_MASK;
+ /* STATUS32 register is updated indirectly with "FLAG" instruction */
+ __asm__("flag %0" : : "r" (status));
+}
+
+static void print_reg_file(long *reg_rev, int start_num)
+{
+ unsigned int i;
+
+ /* Print 3 registers per line */
+ for (i = start_num; i < start_num + 25; i++) {
+ printf("r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev);
+ if (((i + 1) % 3) == 0)
+ printf("\n");
+
+ /* Because pt_regs has registers reversed */
+ reg_rev--;
+ }
+
+ /* Add new-line if none was inserted in the end of loop above */
+ if (((i + 1) % 3) != 0)
+ printf("\n");
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printf("RET:\t0x%08lx\nBLINK:\t0x%08lx\nSTAT32:\t0x%08lx\n",
+ regs->ret, regs->blink, regs->status32);
+ printf("GP: 0x%08lx\t r25: 0x%08lx\t\n", regs->r26, regs->r25);
+ printf("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", regs->bta,
+ regs->sp, regs->fp);
+ printf("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start,
+ regs->lp_end, regs->lp_count);
+
+ print_reg_file(&(regs->r0), 0);
+}
+
+void bad_mode(struct pt_regs *regs)
+{
+ if (regs)
+ show_regs(regs);
+
+ panic("Resetting CPU ...\n");
+}
+
+void do_memory_error(unsigned long address, struct pt_regs *regs)
+{
+ printf("Memory error exception @ 0x%lx\n", address);
+ bad_mode(regs);
+}
+
+void do_instruction_error(unsigned long address, struct pt_regs *regs)
+{
+ printf("Instruction error exception @ 0x%lx\n", address);
+ bad_mode(regs);
+}
+
+void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
+{
+ printf("Machine check exception @ 0x%lx\n", address);
+ bad_mode(regs);
+}
+
+void do_interrupt_handler(void)
+{
+ printf("Interrupt fired\n");
+ bad_mode(0);
+}
+
+void do_itlb_miss(struct pt_regs *regs)
+{
+ printf("I TLB miss exception\n");
+ bad_mode(regs);
+}
+
+void do_dtlb_miss(struct pt_regs *regs)
+{
+ printf("D TLB miss exception\n");
+ bad_mode(regs);
+}
+
+void do_tlb_prot_violation(unsigned long address, struct pt_regs *regs)
+{
+ printf("TLB protection violation or misaligned access @ 0x%lx\n",
+ address);
+ bad_mode(regs);
+}
+
+void do_privilege_violation(struct pt_regs *regs)
+{
+ printf("Privilege violation exception\n");
+ bad_mode(regs);
+}
+
+void do_trap(struct pt_regs *regs)
+{
+ printf("Trap exception\n");
+ bad_mode(regs);
+}
+
+void do_extension(struct pt_regs *regs)
+{
+ printf("Extension instruction exception\n");
+ bad_mode(regs);
+}
diff --git a/arch/arc/cpu/arc700/reset.c b/arch/arc/cpu/arc700/reset.c
new file mode 100644
index 0000000000..98ebf1d445
--- /dev/null
+++ b/arch/arc/cpu/arc700/reset.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <command.h>
+#include <common.h>
+
+int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[])
+{
+ printf("Put your restart handler here\n");
+
+#ifdef DEBUG
+ /* Stop debug session here */
+ __asm__("brk");
+#endif
+ return 0;
+}
diff --git a/arch/arc/cpu/arc700/start.S b/arch/arc/cpu/arc700/start.S
new file mode 100644
index 0000000000..563513b690
--- /dev/null
+++ b/arch/arc/cpu/arc700/start.S
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <asm/arcregs.h>
+
+/*
+ * Note on the LD/ST addressing modes with address register write-back
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSH reg
+ st.a \reg, [%sp, -4]
+.endm
+
+.macro PUSHAX aux
+ lr %r9, [\aux]
+ PUSH %r9
+.endm
+
+.macro SAVE_R1_TO_R24
+ PUSH %r1
+ PUSH %r2
+ PUSH %r3
+ PUSH %r4
+ PUSH %r5
+ PUSH %r6
+ PUSH %r7
+ PUSH %r8
+ PUSH %r9
+ PUSH %r10
+ PUSH %r11
+ PUSH %r12
+ PUSH %r13
+ PUSH %r14
+ PUSH %r15
+ PUSH %r16
+ PUSH %r17
+ PUSH %r18
+ PUSH %r19
+ PUSH %r20
+ PUSH %r21
+ PUSH %r22
+ PUSH %r23
+ PUSH %r24
+.endm
+
+.macro SAVE_ALL_SYS
+
+ st %r0, [%sp]
+ lr %r0, [%ecr]
+ st %r0, [%sp, 8] /* ECR */
+ st %sp, [%sp, 4]
+
+ SAVE_R1_TO_R24
+ PUSH %r25
+ PUSH %gp
+ PUSH %fp
+ PUSH %blink
+ PUSHAX %eret
+ PUSHAX %erstatus
+ PUSH %lp_count
+ PUSHAX %lp_end
+ PUSHAX %lp_start
+ PUSHAX %erbta
+.endm
+
+.align 4
+.globl _start
+_start:
+ /* Critical system events */
+ j reset /* 0 - 0x000 */
+ j memory_error /* 1 - 0x008 */
+ j instruction_error /* 2 - 0x010 */
+
+ /* Device interrupts */
+.rept 29
+ j interrupt_handler /* 3:31 - 0x018:0xF8 */
+.endr
+ /* Exceptions */
+ j EV_MachineCheck /* 0x100, Fatal Machine check (0x20) */
+ j EV_TLBMissI /* 0x108, Intruction TLB miss (0x21) */
+ j EV_TLBMissD /* 0x110, Data TLB miss (0x22) */
+ j EV_TLBProtV /* 0x118, Protection Violation (0x23)
+ or Misaligned Access */
+ j EV_PrivilegeV /* 0x120, Privilege Violation (0x24) */
+ j EV_Trap /* 0x128, Trap exception (0x25) */
+ j EV_Extension /* 0x130, Extn Intruction Excp (0x26) */
+
+memory_error:
+ SAVE_ALL_SYS
+ lr %r0, [%efa]
+ mov %r1, %sp
+ j do_memory_error
+
+instruction_error:
+ SAVE_ALL_SYS
+ lr %r0, [%efa]
+ mov %r1, %sp
+ j do_instruction_error
+
+interrupt_handler:
+ /* Todo - save and restore CPU context when interrupts will be in use */
+ bl do_interrupt_handler
+ rtie
+
+EV_MachineCheck:
+ SAVE_ALL_SYS
+ lr %r0, [%efa]
+ mov %r1, %sp
+ j do_machine_check_fault
+
+EV_TLBMissI:
+ SAVE_ALL_SYS
+ mov %r0, %sp
+ j do_itlb_miss
+
+EV_TLBMissD:
+ SAVE_ALL_SYS
+ mov %r0, %sp
+ j do_dtlb_miss
+
+EV_TLBProtV:
+ SAVE_ALL_SYS
+ lr %r0, [%efa]
+ mov %r1, %sp
+ j do_tlb_prot_violation
+
+EV_PrivilegeV:
+ SAVE_ALL_SYS
+ mov %r0, %sp
+ j do_privilege_violation
+
+EV_Trap:
+ SAVE_ALL_SYS
+ mov %r0, %sp
+ j do_trap
+
+EV_Extension:
+ SAVE_ALL_SYS
+ mov %r0, %sp
+ j do_extension
+
+
+reset:
+ /* Setup interrupt vector base that matches "__text_start" */
+ sr __text_start, [ARC_AUX_INTR_VEC_BASE]
+
+ /* Setup stack pointer */
+ mov %sp, CONFIG_SYS_INIT_SP_ADDR
+ mov %fp, %sp
+
+ /* Clear bss */
+ mov %r0, __bss_start
+ mov %r1, __bss_end
+
+clear_bss:
+ st.ab 0, [%r0, 4]
+ brlt %r0, %r1, clear_bss
+
+ /* Zero the one and only argument of "board_init_f" */
+ mov_s %r0, 0
+ j board_init_f
+
+/*
+ * void relocate_code (addr_sp, gd, addr_moni)
+ *
+ * This "function" does not return, instead it continues in RAM
+ * after relocating the monitor code.
+ *
+ * r0 = start_addr_sp
+ * r1 = new__gd
+ * r2 = relocaddr
+ */
+.align 4
+.globl relocate_code
+relocate_code:
+ /*
+ * r0-r12 might be clobbered by C functions
+ * so we use r13-r16 for storage here
+ */
+ mov %r13, %r0 /* save addr_sp */
+ mov %r14, %r1 /* save addr of gd */
+ mov %r15, %r2 /* save addr of destination */
+
+ mov %r16, %r2 /* %r9 - relocation offset */
+ sub %r16, %r16, __image_copy_start
+
+/* Set up the stack */
+stack_setup:
+ mov %sp, %r13
+ mov %fp, %sp
+
+/* Check if monitor is loaded right in place for relocation */
+ mov %r0, __image_copy_start
+ cmp %r0, %r15 /* skip relocation if code loaded */
+ bz do_board_init_r /* in target location already */
+
+/* Copy data (__image_copy_start - __image_copy_end) to new location */
+ mov %r1, %r15
+ mov %r2, __image_copy_end
+ sub %r2, %r2, %r0 /* r3 <- amount of bytes to copy */
+ asr %r2, %r2, 2 /* r3 <- amount of words to copy */
+ mov %lp_count, %r2
+ lp copy_end
+ ld.ab %r2,[%r0,4]
+ st.ab %r2,[%r1,4]
+copy_end:
+
+/* Fix relocations related issues */
+ bl do_elf_reloc_fixups
+#ifndef CONFIG_SYS_ICACHE_OFF
+ bl invalidate_icache_all
+#endif
+#ifndef CONFIG_SYS_DCACHE_OFF
+ bl flush_dcache_all
+#endif
+
+/* Update position of intterupt vector table */
+ lr %r0, [ARC_AUX_INTR_VEC_BASE] /* Read current position */
+ add %r0, %r0, %r16 /* Update address */
+ sr %r0, [ARC_AUX_INTR_VEC_BASE] /* Write new position */
+
+do_board_init_r:
+/* Prepare for exection of "board_init_r" in relocated monitor */
+ mov %r2, board_init_r /* old address of "board_init_r()" */
+ add %r2, %r2, %r16 /* new address of "board_init_r()" */
+ mov %r0, %r14 /* 1-st parameter: gd_t */
+ mov %r1, %r15 /* 2-nd parameter: dest_addr */
+ j [%r2]
diff --git a/arch/arc/cpu/arc700/timer.c b/arch/arc/cpu/arc700/timer.c
new file mode 100644
index 0000000000..a0acbbc01a
--- /dev/null
+++ b/arch/arc/cpu/arc700/timer.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <asm/arcregs.h>
+
+#define NH_MODE (1 << 1) /* Disable timer if CPU is halted */
+
+int timer_init(void)
+{
+ write_aux_reg(ARC_AUX_TIMER0_CTRL, NH_MODE);
+ /* Set max value for counter/timer */
+ write_aux_reg(ARC_AUX_TIMER0_LIMIT, 0xffffffff);
+ /* Set initial count value and restart counter/timer */
+ write_aux_reg(ARC_AUX_TIMER0_CNT, 0);
+ return 0;
+}
+
+unsigned long timer_read_counter(void)
+{
+ return read_aux_reg(ARC_AUX_TIMER0_CNT);
+}
diff --git a/arch/arc/cpu/arc700/u-boot.lds b/arch/arc/cpu/arc700/u-boot.lds
new file mode 100644
index 0000000000..2d01b21b36
--- /dev/null
+++ b/arch/arc/cpu/arc700/u-boot.lds
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+OUTPUT_FORMAT("elf32-littlearc", "elf32-littlearc", "elf32-littlearc")
+OUTPUT_ARCH(arc)
+ENTRY(_start)
+SECTIONS
+{
+ . = ALIGN(4);
+ .text : {
+ *(.__text_start)
+ *(.__image_copy_start)
+ CPUDIR/start.o (.text*)
+ *(.text*)
+ }
+
+ . = ALIGN(4);
+ .text_end :
+ {
+ *(.__text_end)
+ }
+
+ . = ALIGN(4);
+ .rodata : {
+ *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*)))
+ }
+
+ . = ALIGN(4);
+ .data : {
+ *(.data*)
+ }
+
+ . = ALIGN(4);
+ .u_boot_list : {
+ KEEP(*(SORT(.u_boot_list*)));
+ }
+
+ . = ALIGN(4);
+ .rel_dyn_start : {
+ *(.__rel_dyn_start)
+ }
+
+ .rela.dyn : {
+ *(.rela.dyn)
+ }
+
+ .rel_dyn_end : {
+ *(.__rel_dyn_end)
+ }
+
+ . = ALIGN(4);
+ .bss_start : {
+ *(.__bss_start);
+ }
+
+ .bss : {
+ *(.bss*)
+ }
+
+ .bss_end : {
+ *(.__bss_end);
+ }
+
+ . = ALIGN(4);
+ .image_copy_end : {
+ *(.__image_copy_end)
+ *(.__init_end)
+ }
+}
diff --git a/arch/arc/include/asm/arch-arc700/hardware.h b/arch/arc/include/asm/arch-arc700/hardware.h
new file mode 100644
index 0000000000..8ec13a8593
--- /dev/null
+++ b/arch/arc/include/asm/arch-arc700/hardware.h
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * This file is only required to allow compilation of "designware_i2c" driver.
+ * Which explicitly includes <asm/arch/hardware.h>.
+ */
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 0000000000..5d48d11bab
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+/*
+ * ARC architecture has additional address space - auxiliary registers.
+ * These registers are mostly used for configuration purposes.
+ * These registers are not memory mapped and special commands are used for
+ * access: "lr"/"sr".
+ */
+
+#define ARC_AUX_IDENTITY 0x04
+#define ARC_AUX_STATUS32 0x0a
+
+/* Instruction cache related auxiliary registers */
+#define ARC_AUX_IC_IVIC 0x10
+#define ARC_AUX_IC_CTRL 0x11
+#define ARC_AUX_IC_IVIL 0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_AUX_IC_PTAG 0x1E
+#endif
+
+/* Timer related auxiliary registers */
+#define ARC_AUX_TIMER0_CNT 0x21 /* Timer 0 count */
+#define ARC_AUX_TIMER0_CTRL 0x22 /* Timer 0 control */
+#define ARC_AUX_TIMER0_LIMIT 0x23 /* Timer 0 limit */
+
+#define ARC_AUX_INTR_VEC_BASE 0x25
+
+/* Data cache related auxiliary registers */
+#define ARC_AUX_DC_IVDC 0x47
+#define ARC_AUX_DC_CTRL 0x48
+
+#define ARC_AUX_DC_IVDL 0x4A
+#define ARC_AUX_DC_FLSH 0x4B
+#define ARC_AUX_DC_FLDL 0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_AUX_DC_PTAG 0x5C
+#endif
+
+#ifndef __ASSEMBLY__
+/* Accessors for auxiliary registers */
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 0000000000..85721aaee3
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_BITOPS_H
+#define __ASM_ARC_BITOPS_H
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __ASM_ARC_BITOPS_H */
diff --git a/arch/arc/include/asm/byteorder.h b/arch/arc/include/asm/byteorder.h
new file mode 100644
index 0000000000..2fa9776ca5
--- /dev/null
+++ b/arch/arc/include/asm/byteorder.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#include <asm/types.h>
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+ #define __BYTEORDER_HAS_U64__
+ #define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+ #include <linux/byteorder/little_endian.h>
+#else
+ #include <linux/byteorder/big_endian.h>
+#endif /* CONFIG_SYS_BIG_ENDIAN */
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 0000000000..16e7568ef0
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_CACHE_H
+#define __ASM_ARC_CACHE_H
+
+#include <config.h>
+
+/*
+ * The current upper bound for ARC L1 data cache line sizes is 128 bytes.
+ * We use that value for aligning DMA buffers unless the board config has
+ * specified an alternate cache line size.
+ */
+#ifdef CONFIG_SYS_CACHELINE_SIZE
+#define ARCH_DMA_MINALIGN CONFIG_SYS_CACHELINE_SIZE
+#else
+#define ARCH_DMA_MINALIGN 128
+#endif
+
+#endif /* __ASM_ARC_CACHE_H */
diff --git a/arch/arc/include/asm/config.h b/arch/arc/include/asm/config.h
new file mode 100644
index 0000000000..5761def1e7
--- /dev/null
+++ b/arch/arc/include/asm/config.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_CONFIG_H_
+#define __ASM_ARC_CONFIG_H_
+
+#define CONFIG_LMB
+
+#endif /*__ASM_ARC_CONFIG_H_ */
diff --git a/arch/arc/include/asm/errno.h b/arch/arc/include/asm/errno.h
new file mode 100644
index 0000000000..4c82b503d9
--- /dev/null
+++ b/arch/arc/include/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/arch/arc/include/asm/global_data.h b/arch/arc/include/asm/global_data.h
new file mode 100644
index 0000000000..d644e80586
--- /dev/null
+++ b/arch/arc/include/asm/global_data.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_GLOBAL_DATA_H
+#define __ASM_ARC_GLOBAL_DATA_H
+
+/* Architecture-specific global data */
+struct arch_global_data {
+ int running_on_hw;
+};
+
+#include <asm-generic/global_data.h>
+
+#define DECLARE_GLOBAL_DATA_PTR register volatile gd_t *gd asm ("r25")
+
+#endif /* __ASM_ARC_GLOBAL_DATA_H */
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 0000000000..24b7337308
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_IO_H
+#define __ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+static inline void sync(void)
+{
+ /* Not yet implemented */
+}
+
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__("ldb%U1 %0, %1\n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+ return b;
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__("ldw%U1 %0, %1\n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+ return s;
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__("ld%U1 %0, %1\n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+ return w;
+}
+
+#define readb __raw_readb
+
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
+
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__("stb%U1 %0, %1\n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__("stw%U1 %0, %1\n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+}
+
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__("st%U1 %0, %1\n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+}
+
+#define writeb __raw_writeb
+#define writew(b, addr) __raw_writew(__cpu_to_le16(b), addr)
+#define writel(b, addr) __raw_writel(__cpu_to_le32(b), addr)
+
+static inline int __raw_readsb(unsigned int addr, void *data, int bytelen)
+{
+ __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "stb.ab r8, [r1, 1]\n"
+ :
+ : "r" (addr), "r" (data), "r" (bytelen)
+ : "r8");
+ return bytelen;
+}
+
+static inline int __raw_readsw(unsigned int addr, void *data, int wordlen)
+{
+ __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "stw.ab r8, [r1, 2]\n"
+ :
+ : "r" (addr), "r" (data), "r" (wordlen)
+ : "r8");
+ return wordlen;
+}
+
+static inline int __raw_readsl(unsigned int addr, void *data, int longlen)
+{
+ __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "st.ab r8, [r1, 4]\n"
+ :
+ : "r" (addr), "r" (data), "r" (longlen)
+ : "r8");
+ return longlen;
+}
+
+static inline int __raw_writesb(unsigned int addr, void *data, int bytelen)
+{
+ __asm__ __volatile__ ("1:ldb.ab r8, [r1, 1]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "st.di r8, [r0, 0]\n"
+ :
+ : "r" (addr), "r" (data), "r" (bytelen)
+ : "r8");
+ return bytelen;
+}
+
+static inline int __raw_writesw(unsigned int addr, void *data, int wordlen)
+{
+ __asm__ __volatile__ ("1:ldw.ab r8, [r1, 2]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "st.ab.di r8, [r0, 0]\n"
+ :
+ : "r" (addr), "r" (data), "r" (wordlen)
+ : "r8");
+ return wordlen;
+}
+
+static inline int __raw_writesl(unsigned int addr, void *data, int longlen)
+{
+ __asm__ __volatile__ ("1:ld.ab r8, [r1, 4]\n"
+ "sub.f r2, r2, 1\n"
+ "bnz.d 1b\n"
+ "st.ab.di r8, [r0, 0]\n"
+ :
+ : "r" (addr), "r" (data), "r" (longlen)
+ : "r8");
+ return longlen;
+}
+
+#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v) out_arch(l, le32, a, v)
+#define out_le16(a, v) out_arch(w, le16, a, v)
+
+#define in_le32(a) in_arch(l, le32, a)
+#define in_le16(a) in_arch(w, le16, a)
+
+#define out_be32(a, v) out_arch(l, be32, a, v)
+#define out_be16(a, v) out_arch(w, be16, a, v)
+
+#define in_be32(a) in_arch(l, be32, a)
+#define in_be16(a) in_arch(w, be16, a)
+
+#define out_8(a, v) __raw_writeb(v, a)
+#define in_8(a) __raw_readb(a)
+
+/*
+ * Clear and set bits in one shot. These macros can be used to clear and
+ * set multiple bits in a register using a single call. These macros can
+ * also be used to set a multiple-bit bit pattern using a mask, by
+ * specifying the mask in the 'clear' parameter and the new bit pattern
+ * in the 'set' parameter.
+ */
+
+#define clrbits(type, addr, clear) \
+ out_##type((addr), in_##type(addr) & ~(clear))
+
+#define setbits(type, addr, set) \
+ out_##type((addr), in_##type(addr) | (set))
+
+#define clrsetbits(type, addr, clear, set) \
+ out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
+#define setbits_be32(addr, set) setbits(be32, addr, set)
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+
+#define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
+#define setbits_le32(addr, set) setbits(le32, addr, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+
+#define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
+#define setbits_be16(addr, set) setbits(be16, addr, set)
+#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
+
+#define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
+#define setbits_le16(addr, set) setbits(le16, addr, set)
+#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
+
+#define clrbits_8(addr, clear) clrbits(8, addr, clear)
+#define setbits_8(addr, set) setbits(8, addr, set)
+#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+
+#endif /* __ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/posix_types.h b/arch/arc/include/asm/posix_types.h
new file mode 100644
index 0000000000..20415f0705
--- /dev/null
+++ b/arch/arc/include/asm/posix_types.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_POSIX_TYPES_H
+#define __ASM_ARC_POSIX_TYPES_H
+
+typedef unsigned short __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char *__kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+#endif /* __ASM_ARC_POSIX_TYPES_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 0000000000..8f73b31c10
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+struct pt_regs {
+ long bta;
+ long lp_start;
+ long lp_end;
+ long lp_count;
+ long status32;
+ long ret;
+ long blink;
+ long fp;
+ long r26; /* gp */
+ long r25;
+ long r24;
+ long r23;
+ long r22;
+ long r21;
+ long r20;
+ long r19;
+ long r18;
+ long r17;
+ long r16;
+ long r15;
+ long r14;
+ long r13;
+ long r12;
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long sp;
+ long ecr;
+};
+
+#endif /* __ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 0000000000..18484a17f2
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_SECTIONS_H
+#define __ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern ulong __text_end;
+
+#endif /* __ASM_ARC_SECTIONS_H */
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 0000000000..909129c333
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_STRING_H
+#define __ASM_ARC_STRING_H
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/types.h b/arch/arc/include/asm/types.h
new file mode 100644
index 0000000000..24eeb76bd6
--- /dev/null
+++ b/arch/arc/include/asm/types.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_TYPES_H
+#define __ASM_ARC_TYPES_H
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
+typedef unsigned long phys_addr_t;
+typedef unsigned long phys_size_t;
+
+#endif /* __ASM_ARC_TYPES_H */
diff --git a/arch/arc/include/asm/u-boot-arc.h b/arch/arc/include/asm/u-boot-arc.h
new file mode 100644
index 0000000000..0c0e8e661d
--- /dev/null
+++ b/arch/arc/include/asm/u-boot-arc.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_U_BOOT_ARC_H__
+#define __ASM_ARC_U_BOOT_ARC_H__
+
+int arch_early_init_r(void);
+
+#endif /* __ASM_ARC_U_BOOT_ARC_H__ */
diff --git a/arch/arc/include/asm/u-boot.h b/arch/arc/include/asm/u-boot.h
new file mode 100644
index 0000000000..e354edf95d
--- /dev/null
+++ b/arch/arc/include/asm/u-boot.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __ASM_ARC_U_BOOT_H__
+#define __ASM_ARC_U_BOOT_H__
+
+#include <asm-generic/u-boot.h>
+
+/* For image.h:image_check_target_arch() */
+#define IH_ARCH_DEFAULT IH_ARCH_ARC
+
+#endif /* __ASM_ARC_U_BOOT_H__ */
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 0000000000..6cecbbb211
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1 @@
+#include <asm-generic/unaligned.h>
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 0000000000..7675f855d5
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y += sections.o
+obj-y += relocate.o
+obj-y += strchr-700.o
+obj-y += strcmp.o
+obj-y += strcpy-700.o
+obj-y += strlen.o
+obj-y += memcmp.o
+obj-y += memcpy-700.o
+obj-y += memset.o
+obj-$(CONFIG_CMD_BOOTM) += bootm.o
diff --git a/arch/arc/lib/bootm.c b/arch/arc/lib/bootm.c
new file mode 100644
index 0000000000..d185a50bd3
--- /dev/null
+++ b/arch/arc/lib/bootm.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static ulong get_sp(void)
+{
+ ulong ret;
+
+ asm("mov %0, sp" : "=r"(ret) : );
+ return ret;
+}
+
+void arch_lmb_reserve(struct lmb *lmb)
+{
+ ulong sp;
+
+ /*
+ * Booting a (Linux) kernel image
+ *
+ * Allocate space for command line and board info - the
+ * address should be as high as possible within the reach of
+ * the kernel (see CONFIG_SYS_BOOTMAPSZ settings), but in unused
+ * memory, which means far enough below the current stack
+ * pointer.
+ */
+ sp = get_sp();
+ debug("## Current stack ends at 0x%08lx ", sp);
+
+ /* adjust sp by 4K to be safe */
+ sp -= 4096;
+ lmb_reserve(lmb, sp, (CONFIG_SYS_SDRAM_BASE + gd->ram_size - sp));
+}
+
+static int cleanup_before_linux(void)
+{
+ disable_interrupts();
+ flush_dcache_all();
+ invalidate_icache_all();
+
+ return 0;
+}
+
+/* Subcommand: PREP */
+static void boot_prep_linux(bootm_headers_t *images)
+{
+ if (image_setup_linux(images))
+ hang();
+}
+
+/* Subcommand: GO */
+static void boot_jump_linux(bootm_headers_t *images, int flag)
+{
+ void (*kernel_entry)(int zero, int arch, uint params);
+ unsigned int r0, r2;
+ int fake = (flag & BOOTM_STATE_OS_FAKE_GO);
+
+ kernel_entry = (void (*)(int, int, uint))images->ep;
+
+ debug("## Transferring control to Linux (at address %08lx)...\n",
+ (ulong) kernel_entry);
+ bootstage_mark(BOOTSTAGE_ID_RUN_OS);
+
+ printf("\nStarting kernel ...%s\n\n", fake ?
+ "(fake run for tracing)" : "");
+ bootstage_mark_name(BOOTSTAGE_ID_BOOTM_HANDOFF, "start_kernel");
+
+ cleanup_before_linux();
+
+ if (IMAGE_ENABLE_OF_LIBFDT && images->ft_len) {
+ r0 = 2;
+ r2 = (unsigned int)images->ft_addr;
+ } else {
+ r0 = 1;
+ r2 = (unsigned int)getenv("bootargs");
+ }
+
+ if (!fake)
+ kernel_entry(r0, 0, r2);
+}
+
+int do_bootm_linux(int flag, int argc, char *argv[], bootm_headers_t *images)
+{
+ /* No need for those on ARC */
+ if ((flag & BOOTM_STATE_OS_BD_T) || (flag & BOOTM_STATE_OS_CMDLINE))
+ return -1;
+
+ if (flag & BOOTM_STATE_OS_PREP) {
+ boot_prep_linux(images);
+ return 0;
+ }
+
+ if (flag & (BOOTM_STATE_OS_GO | BOOTM_STATE_OS_FAKE_GO)) {
+ boot_jump_linux(images, flag);
+ return 0;
+ }
+
+ boot_prep_linux(images);
+ boot_jump_linux(images, flag);
+ return 0;
+}
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 0000000000..fa5aac5f67
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* __BIG_ENDIAN__ */
+#define WORD2 r3
+#define SHIFT r2
+#endif /* _ENDIAN__ */
+
+.global memcmp
+.align 4
+memcmp:
+ or %r12, %r0, %r1
+ asl_s %r12, %r12, 30
+ sub %r3, %r2, 1
+ brls %r2, %r12, .Lbytewise
+ ld %r4, [%r0, 0]
+ ld %r5, [%r1, 0]
+ lsr.f %lp_count, %r3, 3
+ lpne .Loop_end
+ ld_s WORD2, [%r0, 4]
+ ld_s %r12, [%r1, 4]
+ brne %r4, %r5, .Leven
+ ld.a %r4, [%r0, 8]
+ ld.a %r5, [%r1, 8]
+ brne WORD2, %r12, .Lodd
+.Loop_end:
+ asl_s SHIFT, SHIFT, 3
+ bhs_s .Last_cmp
+ brne %r4, %r5, .Leven
+ ld %r4, [%r0, 4]
+ ld %r5, [%r1, 4]
+#ifdef __LITTLE_ENDIAN__
+ nop_s
+ /* one more load latency cycle */
+.Last_cmp:
+ xor %r0, %r4, %r5
+ bset %r0, %r0, SHIFT
+ sub_s %r1, %r0, 1
+ bic_s %r1, %r1, %r0
+ norm %r1, %r1
+ b.d .Leven_cmp
+ and %r1, %r1, 24
+.Leven:
+ xor %r0, %r4, %r5
+ sub_s %r1, %r0, 1
+ bic_s %r1, %r1, %r0
+ norm %r1, %r1
+ /* slow track insn */
+ and %r1, %r1, 24
+.Leven_cmp:
+ asl %r2, %r4, %r1
+ asl %r12, %r5, %r1
+ lsr_s %r2, %r2, 1
+ lsr_s %r12, %r12, 1
+ j_s.d [%blink]
+ sub %r0, %r2, %r12
+ .balign 4
+.Lodd:
+ xor %r0, WORD2, %r12
+ sub_s %r1, %r0, 1
+ bic_s %r1, %r1, %r0
+ norm %r1, %r1
+ /* slow track insn */
+ and %r1, %r1, 24
+ asl_s %r2, %r2, %r1
+ asl_s %r12, %r12, %r1
+ lsr_s %r2, %r2, 1
+ lsr_s %r12, %r12, 1
+ j_s.d [%blink]
+ sub %r0, %r2, %r12
+#else /* __BIG_ENDIAN__ */
+.Last_cmp:
+ neg_s SHIFT, SHIFT
+ lsr %r4, %r4, SHIFT
+ lsr %r5, %r5, SHIFT
+ /* slow track insn */
+.Leven:
+ sub.f %r0, %r4, %r5
+ mov.ne %r0, 1
+ j_s.d [%blink]
+ bset.cs %r0, %r0, 31
+.Lodd:
+ cmp_s WORD2, %r12
+
+ mov_s %r0, 1
+ j_s.d [%blink]
+ bset.cs %r0, %r0, 31
+#endif /* _ENDIAN__ */
+ .balign 4
+.Lbytewise:
+ breq %r2, 0, .Lnil
+ ldb %r4, [%r0, 0]
+ ldb %r5, [%r1, 0]
+ lsr.f %lp_count, %r3
+ lpne .Lbyte_end
+ ldb_s %r3, [%r0, 1]
+ ldb %r12, [%r1, 1]
+ brne %r4, %r5, .Lbyte_even
+ ldb.a %r4, [%r0, 2]
+ ldb.a %r5, [%r1, 2]
+ brne %r3, %r12, .Lbyte_odd
+.Lbyte_end:
+ bcc .Lbyte_even
+ brne %r4, %r5, .Lbyte_even
+ ldb_s %r3, [%r0, 1]
+ ldb_s %r12, [%r1, 1]
+.Lbyte_odd:
+ j_s.d [%blink]
+ sub %r0, %r3, %r12
+.Lbyte_even:
+ j_s.d [%blink]
+ sub %r0, %r4, %r5
+.Lnil:
+ j_s.d [%blink]
+ mov %r0, 0
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 0000000000..51dd73ab8f
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+.global memcpy
+.align 4
+memcpy:
+ or %r3, %r0, %r1
+ asl_s %r3, %r3, 30
+ mov_s %r5, %r0
+ brls.d %r2, %r3, .Lcopy_bytewise
+ sub.f %r3, %r2, 1
+ ld_s %r12, [%r1, 0]
+ asr.f %lp_count, %r3, 3
+ bbit0.d %r3, 2, .Lnox4
+ bmsk_s %r2, %r2, 1
+ st.ab %r12, [%r5, 4]
+ ld.a %r12, [%r1, 4]
+.Lnox4:
+ lppnz .Lendloop
+ ld_s %r3, [%r1, 4]
+ st.ab %r12, [%r5, 4]
+ ld.a %r12, [%r1, 8]
+ st.ab %r3, [%r5, 4]
+.Lendloop:
+ breq %r2, 0, .Last_store
+ ld %r3, [%r5, 0]
+#ifdef __LITTLE_ENDIAN__
+ add3 %r2, -1, %r2
+ /* uses long immediate */
+ xor_s %r12, %r12, %r3
+ bmsk %r12, %r12, %r2
+ xor_s %r12, %r12, %r3
+#else /* __BIG_ENDIAN__ */
+ sub3 %r2, 31, %r2
+ /* uses long immediate */
+ xor_s %r3, %r3, %r12
+ bmsk %r3, %r3, %r2
+ xor_s %r12, %r12, %r3
+#endif /* _ENDIAN__ */
+.Last_store:
+ j_s.d [%blink]
+ st %r12, [%r5, 0]
+
+ .balign 4
+.Lcopy_bytewise:
+ jcs [%blink]
+ ldb_s %r12, [%r1, 0]
+ lsr.f %lp_count, %r3
+ bhs_s .Lnox1
+ stb.ab %r12, [%r5, 1]
+ ldb.a %r12, [%r1, 1]
+.Lnox1:
+ lppnz .Lendbloop
+ ldb_s %r3, [%r1, 1]
+ stb.ab %r12, [%r5, 1]
+ ldb.a %r12, [%r1, 2]
+ stb.ab %r3, [%r5, 1]
+.Lendbloop:
+ j_s.d [%blink]
+ stb %r12, [%r5, 0]
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 0000000000..017e8af0e8
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
+
+.global memset
+.align 4
+memset:
+ mov_s %r4, %r0
+ or %r12, %r0, %r2
+ bmsk.f %r12, %r12, 1
+ extb_s %r1, %r1
+ asl %r3, %r1, 8
+ beq.d .Laligned
+ or_s %r1, %r1, %r3
+ brls %r2, SMALL, .Ltiny
+ add %r3, %r2, %r0
+ stb %r1, [%r3, -1]
+ bclr_s %r3, %r3, 0
+ stw %r1, [%r3, -2]
+ bmsk.f %r12, %r0, 1
+ add_s %r2, %r2, %r12
+ sub.ne %r2, %r2, 4
+ stb.ab %r1, [%r4, 1]
+ and %r4, %r4, -2
+ stw.ab %r1, [%r4, 2]
+ and %r4, %r4, -4
+
+ .balign 4
+.Laligned:
+ asl %r3, %r1, 16
+ lsr.f %lp_count, %r2, 2
+ or_s %r1, %r1, %r3
+ lpne .Loop_end
+ st.ab %r1, [%r4, 4]
+.Loop_end:
+ j_s [%blink]
+
+ .balign 4
+.Ltiny:
+ mov.f %lp_count, %r2
+ lpne .Ltiny_end
+ stb.ab %r1, [%r4, 1]
+.Ltiny_end:
+ j_s [%blink]
+
+/*
+ * memzero: @r0 = mem, @r1 = size_t
+ * memset: @r0 = mem, @r1 = char, @r2 = size_t
+ */
+
+.global memzero
+.align 4
+memzero:
+ /* adjust bzero args to memset args */
+ mov %r2, %r1
+ mov %r1, 0
+ /* tail call so need to tinker with blink */
+ b memset
diff --git a/arch/arc/lib/relocate.c b/arch/arc/lib/relocate.c
new file mode 100644
index 0000000000..956aa1494e
--- /dev/null
+++ b/arch/arc/lib/relocate.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <elf.h>
+#include <asm/sections.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * Base functionality is taken from x86 version with added ARC-specifics
+ */
+int do_elf_reloc_fixups(void)
+{
+ Elf32_Rela *re_src = (Elf32_Rela *)(&__rel_dyn_start);
+ Elf32_Rela *re_end = (Elf32_Rela *)(&__rel_dyn_end);
+
+ Elf32_Addr *offset_ptr_rom, *last_offset = NULL;
+ Elf32_Addr *offset_ptr_ram;
+
+ do {
+ /* Get the location from the relocation entry */
+ offset_ptr_rom = (Elf32_Addr *)re_src->r_offset;
+
+ /* Check that the location of the relocation is in .text */
+ if (offset_ptr_rom >= (Elf32_Addr *)CONFIG_SYS_TEXT_BASE &&
+ offset_ptr_rom > last_offset) {
+ unsigned int val;
+ /* Switch to the in-RAM version */
+ offset_ptr_ram = (Elf32_Addr *)((ulong)offset_ptr_rom +
+ gd->reloc_off);
+
+ /*
+ * Use "memcpy" because target location might be
+ * 16-bit aligned on ARC so we may need to read
+ * byte-by-byte. On attempt to read entire word by
+ * CPU throws an exception
+ */
+ memcpy(&val, offset_ptr_ram, sizeof(int));
+
+ /* If location in ".text" section swap value */
+ if ((unsigned int)offset_ptr_rom <
+ (unsigned int)&__text_end)
+ val = (val << 16) | (val >> 16);
+
+ /* Check that the target points into .text */
+ if (val >= CONFIG_SYS_TEXT_BASE && val <=
+ (unsigned int)&__bss_end) {
+ val += gd->reloc_off;
+ /* If location in ".text" section swap value */
+ if ((unsigned int)offset_ptr_rom <
+ (unsigned int)&__text_end)
+ val = (val << 16) | (val >> 16);
+ memcpy(offset_ptr_ram, &val, sizeof(int));
+ } else {
+ debug(" %p: rom reloc %x, ram %p, value %x, limit %x\n",
+ re_src, re_src->r_offset, offset_ptr_ram,
+ val, (unsigned int)&__bss_end);
+ }
+ } else {
+ debug(" %p: rom reloc %x, last %p\n", re_src,
+ re_src->r_offset, last_offset);
+ }
+ last_offset = offset_ptr_rom;
+
+ } while (++re_src < re_end);
+
+ return 0;
+}
diff --git a/arch/arc/lib/sections.c b/arch/arc/lib/sections.c
new file mode 100644
index 0000000000..b0b46a4e9a
--- /dev/null
+++ b/arch/arc/lib/sections.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * For some reason linker sets linker-generated symbols to zero in PIE mode.
+ * A work-around is substitution of linker-generated symbols with
+ * compiler-generated symbols which are properly handled by linker in PAE mode.
+ */
+
+char __bss_start[0] __attribute__((section(".__bss_start")));
+char __bss_end[0] __attribute__((section(".__bss_end")));
+char __image_copy_start[0] __attribute__((section(".__image_copy_start")));
+char __image_copy_end[0] __attribute__((section(".__image_copy_end")));
+char __rel_dyn_start[0] __attribute__((section(".__rel_dyn_start")));
+char __rel_dyn_end[0] __attribute__((section(".__rel_dyn_end")));
+char __text_start[0] __attribute__((section(".__text_start")));
+char __text_end[0] __attribute__((section(".__text_end")));
+char __init_end[0] __attribute__((section(".__init_end")));
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 0000000000..55fcc9fb00
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * ARC700 has a relatively long pipeline and branch prediction, so we want
+ * to avoid branches that are hard to predict. On the other hand, the
+ * presence of the norm instruction makes it easier to operate on whole
+ * words branch-free.
+ */
+
+.global strchr
+.align 4
+strchr:
+ extb_s %r1, %r1
+ asl %r5, %r1, 8
+ bmsk %r2, %r0, 1
+ or %r5, %r5, %r1
+ mov_s %r3, 0x01010101
+ breq.d %r2, %r0, .Laligned
+ asl %r4, %r5, 16
+ sub_s %r0, %r0, %r2
+ asl %r7, %r2, 3
+ ld_s %r2, [%r0]
+#ifdef __LITTLE_ENDIAN__
+ asl %r7, %r3, %r7
+#else /* __BIG_ENDIAN__ */
+ lsr %r7, %r3, %r7
+#endif /* _ENDIAN__ */
+ or %r5, %r5, %r4
+ ror %r4, %r3
+ sub %r12, %r2, %r7
+ bic_s %r12, %r12, %r2
+ and %r12, %r12, %r4
+ brne.d %r12, 0, .Lfound0_ua
+ xor %r6, %r2, %r5
+ ld.a %r2, [%r0, 4]
+ sub %r12, %r6, %r7
+ bic %r12, %r12, %r6
+#ifdef __LITTLE_ENDIAN__
+ and %r7, %r12, %r4
+ /* For speed, we want this branch to be unaligned. */
+ breq %r7, 0, .Loop
+ /* Likewise this one */
+ b .Lfound_char
+#else /* __BIG_ENDIAN__ */
+ and %r12, %r12, %r4
+ /* For speed, we want this branch to be unaligned. */
+ breq %r12, 0, .Loop
+ lsr_s %r12, %r12, 7
+ bic %r2, %r7, %r6
+ b.d .Lfound_char_b
+ and_s %r2, %r2, %r12
+#endif /* _ENDIAN__ */
+ /* We require this code address to be unaligned for speed... */
+.Laligned:
+ ld_s %r2, [%r0]
+ or %r5, %r5, %r4
+ ror %r4, %r3
+ /* ... so that this code address is aligned, for itself and ... */
+.Loop:
+ sub %r12, %r2, %r3
+ bic_s %r12, %r12, %r2
+ and %r12, %r12, %r4
+ brne.d %r12, 0, .Lfound0
+ xor %r6, %r2, %r5
+ ld.a %r2, [%r0, 4]
+ sub %r12, %r6, %r3
+ bic %r12, %r12, %r6
+ and %r7, %r12, %r4
+ breq %r7, 0, .Loop
+ /*
+ *... so that this branch is unaligned.
+ * Found searched-for character.
+ * r0 has already advanced to next word.
+ */
+#ifdef __LITTLE_ENDIAN__
+ /*
+ * We only need the information about the first matching byte
+ * (i.e. the least significant matching byte) to be exact,
+ * hence there is no problem with carry effects.
+ */
+.Lfound_char:
+ sub %r3, %r7, 1
+ bic %r3, %r3, %r7
+ norm %r2, %r3
+ sub_s %r0, %r0, 1
+ asr_s %r2, %r2, 3
+ j.d [%blink]
+ sub_s %r0, %r0, %r2
+
+ .balign 4
+.Lfound0_ua:
+ mov %r3, %r7
+.Lfound0:
+ sub %r3, %r6, %r3
+ bic %r3, %r3, %r6
+ and %r2, %r3, %r4
+ or_s %r12, %r12, %r2
+ sub_s %r3, %r12, 1
+ bic_s %r3, %r3, %r12
+ norm %r3, %r3
+ add_s %r0, %r0, 3
+ asr_s %r12, %r3, 3
+ asl.f 0, %r2, %r3
+ sub_s %r0, %r0, %r12
+ j_s.d [%blink]
+ mov.pl %r0, 0
+#else /* __BIG_ENDIAN__ */
+.Lfound_char:
+ lsr %r7, %r7, 7
+
+ bic %r2, %r7, %r6
+.Lfound_char_b:
+ norm %r2, %r2
+ sub_s %r0, %r0, 4
+ asr_s %r2, %r2, 3
+ j.d [%blink]
+ add_s %r0, %r0, %r2
+
+.Lfound0_ua:
+ mov_s %r3, %r7
+.Lfound0:
+ asl_s %r2, %r2, 7
+ or %r7, %r6, %r4
+ bic_s %r12, %r12, %r2
+ sub %r2, %r7, %r3
+ or %r2, %r2, %r6
+ bic %r12, %r2, %r12
+ bic.f %r3, %r4, %r12
+ norm %r3, %r3
+
+ add.pl %r3, %r3, 1
+ asr_s %r12, %r3, 3
+ asl.f 0, %r2, %r3
+ add_s %r0, %r0, %r12
+ j_s.d [%blink]
+ mov.mi %r0, 0
+#endif /* _ENDIAN__ */
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 0000000000..8cb7d2f18c
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * This is optimized primarily for the ARC700.
+ * It would be possible to speed up the loops by one cycle / word
+ * respective one cycle / byte by forcing double source 1 alignment, unrolling
+ * by a factor of two, and speculatively loading the second word / byte of
+ * source 1; however, that would increase the overhead for loop setup / finish,
+ * and strcmp might often terminate early.
+ */
+
+.global strcmp
+.align 4
+strcmp:
+ or %r2, %r0, %r1
+ bmsk_s %r2, %r2, 1
+ brne %r2, 0, .Lcharloop
+ mov_s %r12, 0x01010101
+ ror %r5, %r12
+.Lwordloop:
+ ld.ab %r2, [%r0, 4]
+ ld.ab %r3, [%r1, 4]
+ nop_s
+ sub %r4, %r2, %r12
+ bic %r4, %r4, %r2
+ and %r4, %r4, %r5
+ brne %r4, 0, .Lfound0
+ breq %r2 ,%r3, .Lwordloop
+#ifdef __LITTLE_ENDIAN__
+ xor %r0, %r2, %r3 /* mask for difference */
+ sub_s %r1, %r0, 1
+ bic_s %r0, %r0, %r1 /* mask for least significant difference bit */
+ sub %r1, %r5, %r0
+ xor %r0, %r5, %r1 /* mask for least significant difference byte */
+ and_s %r2, %r2, %r0
+ and_s %r3, %r3, %r0
+#endif /* _ENDIAN__ */
+ cmp_s %r2, %r3
+ mov_s %r0, 1
+ j_s.d [%blink]
+ bset.lo %r0, %r0, 31
+
+ .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+ xor %r0, %r2, %r3 /* mask for difference */
+ or %r0, %r0, %r4 /* or in zero indicator */
+ sub_s %r1, %r0, 1
+ bic_s %r0, %r0, %r1 /* mask for least significant difference bit */
+ sub %r1, %r5, %r0
+ xor %r0, %r5, %r1 /* mask for least significant difference byte */
+ and_s %r2, %r2, %r0
+ and_s %r3, %r3, %r0
+ sub.f %r0, %r2, %r3
+ mov.hi %r0, 1
+ j_s.d [%blink]
+ bset.lo %r0, %r0, 31
+#else /* __BIG_ENDIAN__ */
+ /*
+ * The zero-detection above can mis-detect 0x01 bytes as zeroes
+ * because of carry-propagateion from a lower significant zero byte.
+ * We can compensate for this by checking that bit0 is zero.
+ * This compensation is not necessary in the step where we
+ * get a low estimate for r2, because in any affected bytes
+ * we already have 0x00 or 0x01, which will remain unchanged
+ * when bit 7 is cleared.
+ */
+ .balign 4
+.Lfound0:
+ lsr %r0, %r4, 8
+ lsr_s %r1, %r2
+ bic_s %r2, %r2, %r0 /* get low estimate for r2 and get ... */
+ bic_s %r0, %r0, %r1 /* <this is the adjusted mask for zeros> */
+ or_s %r3, %r3, %r0 /* ... high estimate r3 so that r2 > r3 will */
+ cmp_s %r3, %r2 /* ... be independent of trailing garbage */
+ or_s %r2, %r2, %r0 /* likewise for r3 > r2 */
+ bic_s %r3, %r3, %r0
+ rlc %r0, 0 /* r0 := r2 > r3 ? 1 : 0 */
+ cmp_s %r2, %r3
+ j_s.d [%blink]
+ bset.lo %r0, %r0, 31
+#endif /* _ENDIAN__ */
+
+ .balign 4
+.Lcharloop:
+ ldb.ab %r2,[%r0,1]
+ ldb.ab %r3,[%r1,1]
+ nop_s
+ breq %r2, 0, .Lcmpend
+ breq %r2, %r3, .Lcharloop
+.Lcmpend:
+ j_s.d [%blink]
+ sub %r0, %r2, %r3
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 0000000000..41bb53e501
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * If dst and src are 4 byte aligned, copy 8 bytes at a time.
+ * If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+ * it 8 byte aligned. Thus, we can do a little read-ahead, without
+ * dereferencing a cache line that we should not touch.
+ * Note that short and long instructions have been scheduled to avoid
+ * branch stalls.
+ * The beq_s to r3z could be made unaligned & long to avoid a stall
+ * there, but it is not likely to be taken often, and it would also be likely
+ * to cost an unaligned mispredict at the next call.
+ */
+
+.global strcpy
+.align 4
+strcpy:
+ or %r2, %r0, %r1
+ bmsk_s %r2, %r2, 1
+ brne.d %r2, 0, charloop
+ mov_s %r10, %r0
+ ld_s %r3, [%r1, 0]
+ mov %r8, 0x01010101
+ bbit0.d %r1, 2, loop_start
+ ror %r12, %r8
+ sub %r2, %r3, %r8
+ bic_s %r2, %r2, %r3
+ tst_s %r2,%r12
+ bne r3z
+ mov_s %r4,%r3
+ .balign 4
+loop:
+ ld.a %r3, [%r1, 4]
+ st.ab %r4, [%r10, 4]
+loop_start:
+ ld.a %r4, [%r1, 4]
+ sub %r2, %r3, %r8
+ bic_s %r2, %r2, %r3
+ tst_s %r2, %r12
+ bne_s r3z
+ st.ab %r3, [%r10, 4]
+ sub %r2, %r4, %r8
+ bic %r2, %r2, %r4
+ tst %r2, %r12
+ beq loop
+ mov_s %r3, %r4
+#ifdef __LITTLE_ENDIAN__
+r3z: bmsk.f %r1, %r3, 7
+ lsr_s %r3, %r3, 8
+#else /* __BIG_ENDIAN__ */
+r3z: lsr.f %r1, %r3, 24
+ asl_s %r3, %r3, 8
+#endif /* _ENDIAN__ */
+ bne.d r3z
+ stb.ab %r1, [%r10, 1]
+ j_s [%blink]
+
+ .balign 4
+charloop:
+ ldb.ab %r3, [%r1, 1]
+ brne.d %r3, 0, charloop
+ stb.ab %r3, [%r10, 1]
+ j [%blink]
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 0000000000..666e22c0d5
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2014 Synopsys, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+.global strlen
+.align 4
+strlen:
+ or %r3, %r0, 7
+ ld %r2, [%r3, -7]
+ ld.a %r6, [%r3, -3]
+ mov %r4, 0x01010101
+ /* uses long immediate */
+#ifdef __LITTLE_ENDIAN__
+ asl_s %r1, %r0, 3
+ btst_s %r0, 2
+ asl %r7, %r4, %r1
+ ror %r5, %r4
+ sub %r1, %r2, %r7
+ bic_s %r1, %r1, %r2
+ mov.eq %r7, %r4
+ sub %r12, %r6, %r7
+ bic %r12, %r12, %r6
+ or.eq %r12, %r12, %r1
+ and %r12, %r12, %r5
+ brne %r12, 0, .Learly_end
+#else /* __BIG_ENDIAN__ */
+ ror %r5, %r4
+ btst_s %r0, 2
+ mov_s %r1, 31
+ sub3 %r7, %r1, %r0
+ sub %r1, %r2, %r4
+ bic_s %r1, %r1, %r2
+ bmsk %r1, %r1, %r7
+ sub %r12, %r6, %r4
+ bic %r12, %r12, %r6
+ bmsk.ne %r12, %r12, %r7
+ or.eq %r12, %r12, %r1
+ and %r12, %r12, %r5
+ brne %r12, 0, .Learly_end
+#endif /* _ENDIAN__ */
+
+.Loop:
+ ld_s %r2, [%r3, 4]
+ ld.a %r6, [%r3, 8]
+ /* stall for load result */
+ sub %r1, %r2, %r4
+ bic_s %r1, %r1, %r2
+ sub %r12, %r6, %r4
+ bic %r12, %r12, %r6
+ or %r12, %r12, %r1
+ and %r12, %r12, %r5
+ breq %r12, 0, .Loop
+.Lend:
+ and.f %r1, %r1, %r5
+ sub.ne %r3, %r3, 4
+ mov.eq %r1, %r12
+#ifdef __LITTLE_ENDIAN__
+ sub_s %r2, %r1, 1
+ bic_s %r2, %r2, %r1
+ norm %r1, %r2
+ sub_s %r0, %r0, 3
+ lsr_s %r1, %r1, 3
+ sub %r0, %r3, %r0
+ j_s.d [%blink]
+ sub %r0, %r0, %r1
+#else /* __BIG_ENDIAN__ */
+ lsr_s %r1, %r1, 7
+ mov.eq %r2, %r6
+ bic_s %r1, %r1, %r2
+ norm %r1, %r1
+ sub %r0, %r3, %r0
+ lsr_s %r1, %r1, 3
+ j_s.d [%blink]
+ add %r0, %r0, %r1
+#endif /* _ENDIAN */
+.Learly_end:
+ b.d .Lend
+ sub_s.ne %r1, %r1, %r1
OpenPOWER on IntegriCloud