summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt19
-rw-r--r--Documentation/devicetree/bindings/nds32/andestech-boards40
-rw-r--r--Documentation/devicetree/bindings/nds32/atl2c.txt28
-rw-r--r--Documentation/devicetree/bindings/nds32/cpus.txt38
-rw-r--r--Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt33
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/nds32/Kconfig103
-rw-r--r--arch/nds32/Kconfig.cpu174
-rw-r--r--arch/nds32/Makefile67
-rw-r--r--arch/nds32/boot/Makefile15
-rw-r--r--arch/nds32/boot/dts/Makefile8
-rw-r--r--arch/nds32/boot/dts/ae3xx.dts85
-rw-r--r--arch/nds32/configs/defconfig104
-rw-r--r--arch/nds32/include/asm/Kbuild55
-rw-r--r--arch/nds32/include/asm/assembler.h39
-rw-r--r--arch/nds32/include/asm/barrier.h15
-rw-r--r--arch/nds32/include/asm/bitfield.h963
-rw-r--r--arch/nds32/include/asm/cache.h12
-rw-r--r--arch/nds32/include/asm/cache_info.h13
-rw-r--r--arch/nds32/include/asm/cacheflush.h44
-rw-r--r--arch/nds32/include/asm/current.h12
-rw-r--r--arch/nds32/include/asm/delay.h39
-rw-r--r--arch/nds32/include/asm/dma-mapping.h14
-rw-r--r--arch/nds32/include/asm/elf.h171
-rw-r--r--arch/nds32/include/asm/fixmap.h29
-rw-r--r--arch/nds32/include/asm/futex.h103
-rw-r--r--arch/nds32/include/asm/highmem.h65
-rw-r--r--arch/nds32/include/asm/io.h83
-rw-r--r--arch/nds32/include/asm/irqflags.h36
-rw-r--r--arch/nds32/include/asm/l2_cache.h137
-rw-r--r--arch/nds32/include/asm/linkage.h11
-rw-r--r--arch/nds32/include/asm/memory.h105
-rw-r--r--arch/nds32/include/asm/mmu.h12
-rw-r--r--arch/nds32/include/asm/mmu_context.h68
-rw-r--r--arch/nds32/include/asm/module.h11
-rw-r--r--arch/nds32/include/asm/nds32.h81
-rw-r--r--arch/nds32/include/asm/page.h67
-rw-r--r--arch/nds32/include/asm/pgalloc.h96
-rw-r--r--arch/nds32/include/asm/pgtable.h409
-rw-r--r--arch/nds32/include/asm/proc-fns.h44
-rw-r--r--arch/nds32/include/asm/processor.h103
-rw-r--r--arch/nds32/include/asm/ptrace.h77
-rw-r--r--arch/nds32/include/asm/shmparam.h19
-rw-r--r--arch/nds32/include/asm/string.h17
-rw-r--r--arch/nds32/include/asm/swab.h35
-rw-r--r--arch/nds32/include/asm/syscall.h188
-rw-r--r--arch/nds32/include/asm/syscalls.h13
-rw-r--r--arch/nds32/include/asm/thread_info.h76
-rw-r--r--arch/nds32/include/asm/tlb.h28
-rw-r--r--arch/nds32/include/asm/tlbflush.h47
-rw-r--r--arch/nds32/include/asm/uaccess.h283
-rw-r--r--arch/nds32/include/asm/unistd.h6
-rw-r--r--arch/nds32/include/asm/vdso.h24
-rw-r--r--arch/nds32/include/asm/vdso_datapage.h36
-rw-r--r--arch/nds32/include/asm/vdso_timer_info.h14
-rw-r--r--arch/nds32/include/uapi/asm/Kbuild29
-rw-r--r--arch/nds32/include/uapi/asm/auxvec.h12
-rw-r--r--arch/nds32/include/uapi/asm/byteorder.h13
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h14
-rw-r--r--arch/nds32/include/uapi/asm/param.h11
-rw-r--r--arch/nds32/include/uapi/asm/ptrace.h25
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h60
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h11
-rw-r--r--arch/nds32/kernel/Makefile23
-rw-r--r--arch/nds32/kernel/asm-offsets.c28
-rw-r--r--arch/nds32/kernel/atl2c.c64
-rw-r--r--arch/nds32/kernel/cacheinfo.c49
-rw-r--r--arch/nds32/kernel/devtree.c19
-rw-r--r--arch/nds32/kernel/dma.c477
-rw-r--r--arch/nds32/kernel/ex-entry.S157
-rw-r--r--arch/nds32/kernel/ex-exit.S184
-rw-r--r--arch/nds32/kernel/ex-scall.S98
-rw-r--r--arch/nds32/kernel/head.S188
-rw-r--r--arch/nds32/kernel/irq.c9
-rw-r--r--arch/nds32/kernel/module.c278
-rw-r--r--arch/nds32/kernel/nds32_ksyms.c31
-rw-r--r--arch/nds32/kernel/process.c208
-rw-r--r--arch/nds32/kernel/ptrace.c119
-rw-r--r--arch/nds32/kernel/setup.c363
-rw-r--r--arch/nds32/kernel/signal.c324
-rw-r--r--arch/nds32/kernel/stacktrace.c47
-rw-r--r--arch/nds32/kernel/sys_nds32.c50
-rw-r--r--arch/nds32/kernel/syscall_table.c17
-rw-r--r--arch/nds32/kernel/time.c11
-rw-r--r--arch/nds32/kernel/traps.c430
-rw-r--r--arch/nds32/kernel/vdso.c230
-rw-r--r--arch/nds32/kernel/vdso/Makefile82
-rw-r--r--arch/nds32/kernel/vdso/datapage.S21
-rwxr-xr-xarch/nds32/kernel/vdso/gen_vdso_offsets.sh15
-rw-r--r--arch/nds32/kernel/vdso/gettimeofday.c270
-rw-r--r--arch/nds32/kernel/vdso/note.S11
-rw-r--r--arch/nds32/kernel/vdso/sigreturn.S19
-rw-r--r--arch/nds32/kernel/vdso/vdso.S18
-rw-r--r--arch/nds32/kernel/vdso/vdso.lds.S75
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S57
-rw-r--r--arch/nds32/lib/Makefile3
-rw-r--r--arch/nds32/lib/clear_user.S42
-rw-r--r--arch/nds32/lib/copy_from_user.S45
-rw-r--r--arch/nds32/lib/copy_page.S37
-rw-r--r--arch/nds32/lib/copy_template.S69
-rw-r--r--arch/nds32/lib/copy_to_user.S45
-rw-r--r--arch/nds32/lib/memcpy.S30
-rw-r--r--arch/nds32/lib/memmove.S70
-rw-r--r--arch/nds32/lib/memset.S33
-rw-r--r--arch/nds32/lib/memzero.S18
-rw-r--r--arch/nds32/mm/Makefile7
-rw-r--r--arch/nds32/mm/alignment.c576
-rw-r--r--arch/nds32/mm/cacheflush.c322
-rw-r--r--arch/nds32/mm/extable.c16
-rw-r--r--arch/nds32/mm/fault.c410
-rw-r--r--arch/nds32/mm/highmem.c79
-rw-r--r--arch/nds32/mm/init.c277
-rw-r--r--arch/nds32/mm/ioremap.c62
-rw-r--r--arch/nds32/mm/mm-nds32.c90
-rw-r--r--arch/nds32/mm/mmap.c73
-rw-r--r--arch/nds32/mm/proc.c533
-rw-r--r--arch/nds32/mm/tlb.c50
-rw-r--r--arch/nios2/include/asm/io.h1
-rw-r--r--arch/openrisc/include/asm/io.h3
-rw-r--r--arch/sparc/include/asm/io_32.h5
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/xtensa/include/asm/io.h1
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-atcpit100.c266
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-ativic32.c107
-rw-r--r--drivers/net/ethernet/faraday/Kconfig8
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--include/asm-generic/io.h18
130 files changed, 11864 insertions, 21 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt b/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt
new file mode 100644
index 000000000000..f4b4193d830e
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt
@@ -0,0 +1,19 @@
+* Andestech Internal Vector Interrupt Controller
+
+The Internal Vector Interrupt Controller (IVIC) is a basic interrupt controller
+suitable for a simpler SoC platform not requiring a more sophisticated and
+bigger External Vector Interrupt Controller.
+
+
+Main node required properties:
+
+- compatible : should at least contain "andestech,ativic32".
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells: 1 cells and refer to interrupt-controller/interrupts
+
+Examples:
+ intc: interrupt-controller {
+ compatible = "andestech,ativic32";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
diff --git a/Documentation/devicetree/bindings/nds32/andestech-boards b/Documentation/devicetree/bindings/nds32/andestech-boards
new file mode 100644
index 000000000000..f5d75693e3c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/nds32/andestech-boards
@@ -0,0 +1,40 @@
+Andestech(nds32) AE3XX Platform
+-----------------------------------------------------------------------------
+The AE3XX prototype demonstrates the AE3XX example platform on the FPGA. It
+is composed of one Andestech(nds32) processor and AE3XX.
+
+Required properties (in root node):
+- compatible = "andestech,ae3xx";
+
+Example:
+/dts-v1/;
+/ {
+ compatible = "andestech,ae3xx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+};
+
+Andestech(nds32) AG101P Platform
+-----------------------------------------------------------------------------
+AG101P is a generic SoC Platform IP that works with any of Andestech(nds32)
+processors to provide a cost-effective and high performance solution for
+majority of embedded systems in variety of application domains. Users may
+simply attach their IP on one of the system buses together with certain glue
+logics to complete a SoC solution for a specific application. With
+comprehensive simulation and design environments, users may evaluate the
+system performance of their applications and track bugs of their designs
+efficiently. The optional hardware development platform further provides real
+system environment for early prototyping and software/hardware co-development.
+
+Required properties (in root node):
+ compatible = "andestech,ag101p";
+
+Example:
+/dts-v1/;
+/ {
+ compatible = "andestech,ag101p";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+};
diff --git a/Documentation/devicetree/bindings/nds32/atl2c.txt b/Documentation/devicetree/bindings/nds32/atl2c.txt
new file mode 100644
index 000000000000..da8ab8e7ae9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/nds32/atl2c.txt
@@ -0,0 +1,28 @@
+* Andestech L2 cache Controller
+
+The level-2 cache controller plays an important role in reducing memory latency
+for high performance systems, such as thoese designs with AndesCore processors.
+Level-2 cache controller in general enhances overall system performance
+signigicantly and the system power consumption might be reduced as well by
+reducing DRAM accesses.
+
+This binding specifies what properties must be available in the device tree
+representation of an Andestech L2 cache controller.
+
+Required properties:
+ - compatible:
+ Usage: required
+ Value type: <string>
+ Definition: "andestech,atl2c"
+ - reg : Physical base address and size of cache controller's memory mapped
+ - cache-unified : Specifies the cache is a unified cache.
+ - cache-level : Should be set to 2 for a level 2 cache.
+
+* Example
+
+ cache-controller@e0500000 {
+ compatible = "andestech,atl2c";
+ reg = <0xe0500000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/nds32/cpus.txt b/Documentation/devicetree/bindings/nds32/cpus.txt
new file mode 100644
index 000000000000..6f9e311b6589
--- /dev/null
+++ b/Documentation/devicetree/bindings/nds32/cpus.txt
@@ -0,0 +1,38 @@
+* Andestech Processor Binding
+
+This binding specifies what properties must be available in the device tree
+representation of a Andestech Processor Core, which is the root node in the
+tree.
+
+Required properties:
+
+ - compatible:
+ Usage: required
+ Value type: <string>
+ Definition: Should be "andestech,<core_name>", "andestech,nds32v3" as fallback.
+ Must contain "andestech,nds32v3" as the most generic value, in addition to
+ one of the following identifiers for a particular CPU core:
+ "andestech,n13"
+ "andestech,n15"
+ "andestech,d15"
+ "andestech,n10"
+ "andestech,d10"
+ - device_type
+ Usage: required
+ Value type: <string>
+ Definition: must be "cpu"
+ - reg: Contains CPU index.
+ - clock-frequency: Contains the clock frequency for CPU, in Hz.
+
+* Examples
+
+/ {
+ cpus {
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "andestech,n13", "andestech,nds32v3";
+ reg = <0x0>;
+ clock-frequency = <60000000>
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt b/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt
new file mode 100644
index 000000000000..4c9ea5989e35
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/andestech,atcpit100-timer.txt
@@ -0,0 +1,33 @@
+Andestech ATCPIT100 timer
+------------------------------------------------------------------
+ATCPIT100 is a generic IP block from Andes Technology, embedded in
+Andestech AE3XX platforms and other designs.
+
+This timer is a set of compact multi-function timers, which can be
+used as pulse width modulators (PWM) as well as simple timers.
+
+It supports up to 4 PIT channels. Each PIT channel is a
+multi-function timer and provide the following usage scenarios:
+One 32-bit timer
+Two 16-bit timers
+Four 8-bit timers
+One 16-bit PWM
+One 16-bit timer and one 8-bit PWM
+Two 8-bit timer and one 8-bit PWM
+
+Required properties:
+- compatible : Should be "andestech,atcpit100"
+- reg : Address and length of the register set
+- interrupts : Reference to the timer interrupt
+- clocks : a clock to provide the tick rate for "andestech,atcpit100"
+- clock-names : should be "PCLK" for the peripheral clock source.
+
+Examples:
+
+timer0: timer@f0400000 {
+ compatible = "andestech,atcpit100";
+ reg = <0xf0400000 0x1000>;
+ interrupts = <2>;
+ clocks = <&apb>;
+ clock-names = "PCLK";
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index 689f875b47fa..b587fd48b924 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -870,6 +870,17 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+ANDES ARCHITECTURE
+M: Greentime Hu <green.hu@gmail.com>
+M: Vincent Chen <deanbo422@gmail.com>
+T: git https://github.com/andestech/linux.git
+S: Supported
+F: arch/nds32/
+F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt
+F: Documentation/devicetree/bindings/nds32/
+K: nds32
+N: nds32
+
ANDROID CONFIG FRAGMENTS
M: Rob Herring <robh@kernel.org>
S: Supported
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
new file mode 100644
index 000000000000..249f38d3388f
--- /dev/null
+++ b/arch/nds32/Kconfig
@@ -0,0 +1,103 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+config NDS32
+ def_bool y
+ select ARCH_WANT_FRAME_POINTERS if FTRACE
+ select CLKSRC_MMIO
+ select CLONE_BACKWARDS
+ select COMMON_CLK
+ select GENERIC_ATOMIC64
+ select GENERIC_CPU_DEVICES
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_SHOW
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select GENERIC_TIME_VSYSCALL
+ select HANDLE_DOMAIN_IRQ
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_MEMBLOCK
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select IRQ_DOMAIN
+ select LOCKDEP_SUPPORT
+ select MODULES_USE_ELF_RELA
+ select OF
+ select OF_EARLY_FLATTREE
+ select NO_BOOTMEM
+ select NO_IOPORT_MAP
+ select RTC_LIB
+ select THREAD_INFO_IN_TASK
+ help
+ Andes(nds32) Linux support.
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_LOCKBREAK
+ def_bool y
+ depends on PREEMPT
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config FIX_EARLYCON_MEM
+ def_bool y
+
+config PGTABLE_LEVELS
+ default 2
+
+source "init/Kconfig"
+
+menu "System Type"
+source "arch/nds32/Kconfig.cpu"
+config NR_CPUS
+ int
+ default 1
+
+config MMU
+ def_bool y
+
+config NDS32_BUILTIN_DTB
+ string "Builtin DTB"
+ default ""
+ help
+ User can use it to specify the dts of the SoC
+endmenu
+
+menu "Kernel Features"
+source "kernel/Kconfig.preempt"
+source "mm/Kconfig"
+source "kernel/Kconfig.hz"
+endmenu
+
+menu "Executable file formats"
+source "fs/Kconfig.binfmt"
+endmenu
+
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+
+menu "Kernel hacking"
+source "lib/Kconfig.debug"
+endmenu
+
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu
new file mode 100644
index 000000000000..ba44cc539da9
--- /dev/null
+++ b/arch/nds32/Kconfig.cpu
@@ -0,0 +1,174 @@
+comment "Processor Features"
+
+config CPU_BIG_ENDIAN
+ bool "Big endian"
+
+config CPU_LITTLE_ENDIAN
+ def_bool !CPU_BIG_ENDIAN
+
+config HWZOL
+ bool "hardware zero overhead loop support"
+ depends on CPU_D10 || CPU_D15
+ default n
+ help
+ A set of Zero-Overhead Loop mechanism is provided to reduce the
+ instruction fetch and execution overhead of loop-control instructions.
+ It will save 3 registers($LB, $LC, $LE) for context saving if say Y.
+ You don't need to save these registers if you can make sure your user
+ program doesn't use these registers.
+
+ If unsure, say N.
+
+config CPU_CACHE_ALIASING
+ bool "Aliasing cache"
+ depends on CPU_N10 || CPU_D10 || CPU_N13 || CPU_V3
+ default y
+ help
+ If this CPU is using VIPT data cache and its cache way size is larger
+ than page size, say Y. If it is using PIPT data cache, say N.
+
+ If unsure, say Y.
+
+choice
+ prompt "minimum CPU type"
+ default CPU_V3
+ help
+ The data cache of N15/D15 is implemented as PIPT and it will not cause
+ the cache aliasing issue. The rest cpus(N13, N10 and D10) are
+ implemented as VIPT data cache. It may cause the cache aliasing issue
+ if its cache way size is larger than page size. You can specify the
+ CPU type direcly or choose CPU_V3 if unsure.
+
+ A kernel built for N10 is able to run on N15, D15, N13, N10 or D10.
+ A kernel built for N15 is able to run on N15 or D15.
+ A kernel built for D10 is able to run on D10 or D15.
+ A kernel built for D15 is able to run on D15.
+ A kernel built for N13 is able to run on N15, N13 or D15.
+
+config CPU_N15
+ bool "AndesCore N15"
+config CPU_N13
+ bool "AndesCore N13"
+ select CPU_CACHE_ALIASING if ANDES_PAGE_SIZE_4KB
+config CPU_N10
+ bool "AndesCore N10"
+ select CPU_CACHE_ALIASING
+config CPU_D15
+ bool "AndesCore D15"
+config CPU_D10
+ bool "AndesCore D10"
+ select CPU_CACHE_ALIASING
+config CPU_V3
+ bool "AndesCore v3 compatible"
+ select CPU_CACHE_ALIASING
+endchoice
+choice
+ prompt "Paging -- page size "
+ default ANDES_PAGE_SIZE_4KB
+config ANDES_PAGE_SIZE_4KB
+ bool "use 4KB page size"
+config ANDES_PAGE_SIZE_8KB
+ bool "use 8KB page size"
+endchoice
+
+config CPU_ICACHE_DISABLE
+ bool "Disable I-Cache"
+ help
+ Say Y here to disable the processor instruction cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_DISABLE
+ bool "Disable D-Cache"
+ help
+ Say Y here to disable the processor data cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_WRITETHROUGH
+ bool "Force write through D-cache"
+ depends on !CPU_DCACHE_DISABLE
+ help
+ Say Y here to use the data cache in writethrough mode. Unless you
+ specifically require this or are unsure, say N.
+
+config WBNA
+ bool "WBNA"
+ default n
+ help
+ Say Y here to enable write-back memory with no-write-allocation policy.
+
+config ALIGNMENT_TRAP
+ bool "Kernel support unaligned access handling by sw"
+ depends on PROC_FS
+ default n
+ help
+ Andes processors cannot load/store information which is not
+ naturally aligned on the bus, i.e., a 4 byte load must start at an
+ address divisible by 4. On 32-bit Andes processors, these non-aligned
+ load/store instructions will be emulated in software if you say Y
+ here, which has a severe performance impact. With an IP-only
+ configuration it is safe to say N, otherwise say Y.
+
+config HW_SUPPORT_UNALIGNMENT_ACCESS
+ bool "Kernel support unaligned access handling by hw"
+ depends on !ALIGNMENT_TRAP
+ default n
+ help
+ Andes processors load/store world/half-word instructions can access
+ unaligned memory locations without generating the Data Alignment
+ Check exceptions. With an IP-only configuration it is safe to say N,
+ otherwise say Y.
+
+config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU && !CPU_CACHE_ALIASING
+ help
+ The address space of Andes processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+ space as well as some memory mapped IO. That means that, if you
+ have a large amount of physical memory and/or IO, not all of the
+ memory can be "permanently mapped" by the kernel. The physical
+ memory that is not permanently mapped is called "high memory".
+
+ Depending on the selected kernel/user memory split, minimum
+ vmalloc space and actual amount of RAM, you may not need this
+ option which should result in a slightly faster kernel.
+
+ If unsure, say N.
+
+config CACHE_L2
+ bool "Support L2 cache"
+ default y
+ help
+ Say Y here to enable L2 cache if your SoC are integrated with L2CC.
+ If unsure, say N.
+
+menu "Memory configuration"
+
+choice
+ prompt "Memory split"
+ depends on MMU
+ default VMSPLIT_3G_OPT
+ help
+ Select the desired split between kernel and user memory.
+
+ If you are not absolutely sure what you are doing, leave this
+ option alone!
+
+ config VMSPLIT_3G
+ bool "3G/1G user/kernel split"
+ config VMSPLIT_3G_OPT
+ bool "3G/1G user/kernel split (for full 1G low memory)"
+ config VMSPLIT_2G
+ bool "2G/2G user/kernel split"
+ config VMSPLIT_1G
+ bool "1G/3G user/kernel split"
+endchoice
+
+config PAGE_OFFSET
+ hex
+ default 0x40000000 if VMSPLIT_1G
+ default 0x80000000 if VMSPLIT_2G
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0xC0000000
+
+endmenu
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
new file mode 100644
index 000000000000..91f933d5a962
--- /dev/null
+++ b/arch/nds32/Makefile
@@ -0,0 +1,67 @@
+LDFLAGS_vmlinux := --no-undefined -X
+OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+KBUILD_DEFCONFIG := defconfig
+
+comma = ,
+
+KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog)
+KBUILD_CFLAGS += -mcmodel=large
+
+KBUILD_CFLAGS +=$(arch-y) $(tune-y)
+KBUILD_AFLAGS +=$(arch-y) $(tune-y)
+
+#Default value
+head-y := arch/nds32/kernel/head.o
+textaddr-y := $(CONFIG_PAGE_OFFSET)+0xc000
+
+TEXTADDR := $(textaddr-y)
+
+export TEXTADDR
+
+
+# If we have a machine-specific directory, then include it in the build.
+core-y += arch/nds32/kernel/ arch/nds32/mm/
+libs-y += arch/nds32/lib/
+LIBGCC_PATH := \
+ $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
+libs-y += $(LIBGCC_PATH)
+
+ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
+BUILTIN_DTB := y
+else
+BUILTIN_DTB := n
+endif
+
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+KBUILD_CFLAGS += $(call cc-option, -EL)
+else
+KBUILD_CFLAGS += $(call cc-option, -EB)
+endif
+
+boot := arch/nds32/boot
+core-$(BUILTIN_DTB) += $(boot)/dts/
+
+.PHONY: FORCE
+
+Image: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso $@
+
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
+
+CLEAN_FILES += include/asm-nds32/constants.h*
+
+# We use MRPROPER_FILES and CLEAN_FILES now
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+ echo ' Image - kernel image (arch/$(ARCH)/boot/Image)'
+endef
diff --git a/arch/nds32/boot/Makefile b/arch/nds32/boot/Makefile
new file mode 100644
index 000000000000..3f9b86f68d8f
--- /dev/null
+++ b/arch/nds32/boot/Makefile
@@ -0,0 +1,15 @@
+targets := Image Image.gz
+
+$(obj)/Image: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/Image.gz: $(obj)/Image FORCE
+ $(call if_changed,gzip)
+
+install: $(obj)/Image
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image System.map "$(INSTALL_PATH)"
+
+zinstall: $(obj)/Image.gz
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/nds32/boot/dts/Makefile b/arch/nds32/boot/dts/Makefile
new file mode 100644
index 000000000000..d31faa8a1d50
--- /dev/null
+++ b/arch/nds32/boot/dts/Makefile
@@ -0,0 +1,8 @@
+ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_NDS32_BUILTIN_DTB)).dtb.o
+else
+BUILTIN_DTB :=
+endif
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+
+clean-files := *.dtb *.dtb.S
diff --git a/arch/nds32/boot/dts/ae3xx.dts b/arch/nds32/boot/dts/ae3xx.dts
new file mode 100644
index 000000000000..bb39749a6673
--- /dev/null
+++ b/arch/nds32/boot/dts/ae3xx.dts
@@ -0,0 +1,85 @@
+/dts-v1/;
+/ {
+ compatible = "andestech,ae3xx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+ stdout-path = &serial0;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "andestech,n13", "andestech,nds32v3";
+ reg = <0>;
+ clock-frequency = <60000000>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ intc: interrupt-controller {
+ compatible = "andestech,ativic32";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ clock: clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <30000000>;
+ };
+
+ apb {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ serial0: serial@f0300000 {
+ compatible = "andestech,uart16550", "ns16550a";
+ reg = <0xf0300000 0x1000>;
+ interrupts = <8>;
+ clock-frequency = <14745600>;
+ reg-shift = <2>;
+ reg-offset = <32>;
+ no-loopback-test = <1>;
+ };
+
+ timer0: timer@f0400000 {
+ compatible = "andestech,atcpit100";
+ reg = <0xf0400000 0x1000>;
+ interrupts = <2>;
+ clocks = <&clock>;
+ clock-names = "PCLK";
+ };
+ };
+
+ ahb {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ L2: cache-controller@e0500000 {
+ compatible = "andestech,atl2c";
+ reg = <0xe0500000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ mac0: ethernet@e0100000 {
+ compatible = "andestech,atmac100";
+ reg = <0xe0100000 0x1000>;
+ interrupts = <18>;
+ };
+ };
+};
diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig
new file mode 100644
index 000000000000..2546d8770785
--- /dev/null
+++ b/arch/nds32/configs/defconfig
@@ -0,0 +1,104 @@
+CONFIG_CROSS_COMPILE="nds32le-linux-"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_USER_NS=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CACHE_L2 is not set
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_FTMAC100=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_ITE is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_GENERIC_PHY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_READABLE_ASM=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_STACKTRACE=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
new file mode 100644
index 000000000000..06bdf8167f5a
--- /dev/null
+++ b/arch/nds32/include/asm/Kbuild
@@ -0,0 +1,55 @@
+generic-y += asm-offsets.h
+generic-y += atomic.h
+generic-y += bitops.h
+generic-y += bitsperlong.h
+generic-y += bpf_perf_event.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += checksum.h
+generic-y += clkdev.h
+generic-y += cmpxchg.h
+generic-y += cmpxchg-local.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += gpio.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += irq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += kvm_para.h
+generic-y += limits.h
+generic-y += local.h
+generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += sizes.h
+generic-y += stat.h
+generic-y += switch_to.h
+generic-y += timex.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
diff --git a/arch/nds32/include/asm/assembler.h b/arch/nds32/include/asm/assembler.h
new file mode 100644
index 000000000000..c3855782a541
--- /dev/null
+++ b/arch/nds32/include/asm/assembler.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_ASSEMBLER_H__
+#define __NDS32_ASSEMBLER_H__
+
+.macro gie_disable
+ setgie.d
+ dsb
+.endm
+
+.macro gie_enable
+ setgie.e
+ dsb
+.endm
+
+.macro gie_save oldpsw
+ mfsr \oldpsw, $ir0
+ setgie.d
+ dsb
+.endm
+
+.macro gie_restore oldpsw
+ andi \oldpsw, \oldpsw, #0x1
+ beqz \oldpsw, 7001f
+ setgie.e
+ dsb
+7001:
+.endm
+
+
+#define USER(insn, reg, addr, opr) \
+9999: insn reg, addr, opr; \
+ .section __ex_table,"a"; \
+ .align 3; \
+ .long 9999b, 9001f; \
+ .previous
+
+#endif /* __NDS32_ASSEMBLER_H__ */
diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h
new file mode 100644
index 000000000000..faafc373ea6c
--- /dev/null
+++ b/arch/nds32/include/asm/barrier.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_ASM_BARRIER_H
+#define __NDS32_ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+#define mb() asm volatile("msync all":::"memory")
+#define rmb() asm volatile("msync all":::"memory")
+#define wmb() asm volatile("msync store":::"memory")
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __NDS32_ASM_BARRIER_H */
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
new file mode 100644
index 000000000000..c73f71d67744
--- /dev/null
+++ b/arch/nds32/include/asm/bitfield.h
@@ -0,0 +1,963 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_BITFIELD_H__
+#define __NDS32_BITFIELD_H__
+/******************************************************************************
+ * cr0: CPU_VER (CPU Version Register)
+ *****************************************************************************/
+#define CPU_VER_offCFGID 0 /* Minor configuration */
+#define CPU_VER_offREV 16 /* Revision of the CPU version */
+#define CPU_VER_offCPUID 24 /* Major CPU versions */
+
+#define CPU_VER_mskCFGID ( 0xFFFF << CPU_VER_offCFGID )
+#define CPU_VER_mskREV ( 0xFF << CPU_VER_offREV )
+#define CPU_VER_mskCPUID ( 0xFF << CPU_VER_offCPUID )
+
+/******************************************************************************
+ * cr1: ICM_CFG (Instruction Cache/Memory Configuration Register)
+ *****************************************************************************/
+#define ICM_CFG_offISET 0 /* I-cache sets (# of cache lines) per way */
+#define ICM_CFG_offIWAY 3 /* I-cache ways */
+#define ICM_CFG_offISZ 6 /* I-cache line size */
+#define ICM_CFG_offILCK 9 /* I-cache locking support */
+#define ICM_CFG_offILMB 10 /* On-chip ILM banks */
+#define ICM_CFG_offBSAV 13 /* ILM base register alignment version */
+/* bit 15:31 reserved */
+
+#define ICM_CFG_mskISET ( 0x7 << ICM_CFG_offISET )
+#define ICM_CFG_mskIWAY ( 0x7 << ICM_CFG_offIWAY )
+#define ICM_CFG_mskISZ ( 0x7 << ICM_CFG_offISZ )
+#define ICM_CFG_mskILCK ( 0x1 << ICM_CFG_offILCK )
+#define ICM_CFG_mskILMB ( 0x7 << ICM_CFG_offILMB )
+#define ICM_CFG_mskBSAV ( 0x3 << ICM_CFG_offBSAV )
+
+/******************************************************************************
+ * cr2: DCM_CFG (Data Cache/Memory Configuration Register)
+ *****************************************************************************/
+#define DCM_CFG_offDSET 0 /* D-cache sets (# of cache lines) per way */
+#define DCM_CFG_offDWAY 3 /* D-cache ways */
+#define DCM_CFG_offDSZ 6 /* D-cache line size */
+#define DCM_CFG_offDLCK 9 /* D-cache locking support */
+#define DCM_CFG_offDLMB 10 /* On-chip DLM banks */
+#define DCM_CFG_offBSAV 13 /* DLM base register alignment version */
+/* bit 15:31 reserved */
+
+#define DCM_CFG_mskDSET ( 0x7 << DCM_CFG_offDSET )
+#define DCM_CFG_mskDWAY ( 0x7 << DCM_CFG_offDWAY )
+#define DCM_CFG_mskDSZ ( 0x7 << DCM_CFG_offDSZ )
+#define DCM_CFG_mskDLCK ( 0x1 << DCM_CFG_offDLCK )
+#define DCM_CFG_mskDLMB ( 0x7 << DCM_CFG_offDLMB )
+#define DCM_CFG_mskBSAV ( 0x3 << DCM_CFG_offBSAV )
+
+/******************************************************************************
+ * cr3: MMU_CFG (MMU Configuration Register)
+ *****************************************************************************/
+#define MMU_CFG_offMMPS 0 /* Memory management protection scheme */
+#define MMU_CFG_offMMPV 2 /* Memory management protection version number */
+#define MMU_CFG_offFATB 7 /* Fully-associative or non-fully-associative TLB */
+
+#define MMU_CFG_offTBW 8 /* TLB ways(non-associative) TBS */
+#define MMU_CFG_offTBS 11 /* TLB sets per way(non-associative) TBS */
+/* bit 14:14 reserved */
+
+#define MMU_CFG_offEP8MIN4 15 /* 8KB page supported while minimum page is 4KB */
+#define MMU_CFG_offfEPSZ 16 /* Extra page size supported */
+#define MMU_CFG_offTLBLCK 24 /* TLB locking support */
+#define MMU_CFG_offHPTWK 25 /* Hardware Page Table Walker implemented */
+#define MMU_CFG_offDE 26 /* Default endian */
+#define MMU_CFG_offNTPT 27 /* Partitions for non-translated attributes */
+#define MMU_CFG_offIVTB 28 /* Invisible TLB */
+#define MMU_CFG_offVLPT 29 /* VLPT for fast TLB fill handling implemented */
+#define MMU_CFG_offNTME 30 /* Non-translated VA to PA mapping */
+/* bit 31 reserved */
+
+#define MMU_CFG_mskMMPS ( 0x3 << MMU_CFG_offMMPS )
+#define MMU_CFG_mskMMPV ( 0x1F << MMU_CFG_offMMPV )
+#define MMU_CFG_mskFATB ( 0x1 << MMU_CFG_offFATB )
+#define MMU_CFG_mskTBW ( 0x7 << MMU_CFG_offTBW )
+#define MMU_CFG_mskTBS ( 0x7 << MMU_CFG_offTBS )
+#define MMU_CFG_mskEP8MIN4 ( 0x1 << MMU_CFG_offEP8MIN4 )
+#define MMU_CFG_mskfEPSZ ( 0xFF << MMU_CFG_offfEPSZ )
+#define MMU_CFG_mskTLBLCK ( 0x1 << MMU_CFG_offTLBLCK )
+#define MMU_CFG_mskHPTWK ( 0x1 << MMU_CFG_offHPTWK )
+#define MMU_CFG_mskDE ( 0x1 << MMU_CFG_offDE )
+#define MMU_CFG_mskNTPT ( 0x1 << MMU_CFG_offNTPT )
+#define MMU_CFG_mskIVTB ( 0x1 << MMU_CFG_offIVTB )
+#define MMU_CFG_mskVLPT ( 0x1 << MMU_CFG_offVLPT )
+#define MMU_CFG_mskNTME ( 0x1 << MMU_CFG_offNTME )
+
+/******************************************************************************
+ * cr4: MSC_CFG (Misc Configuration Register)
+ *****************************************************************************/
+#define MSC_CFG_offEDM 0
+#define MSC_CFG_offLMDMA 1
+#define MSC_CFG_offPFM 2
+#define MSC_CFG_offHSMP 3
+#define MSC_CFG_offTRACE 4
+#define MSC_CFG_offDIV 5
+#define MSC_CFG_offMAC 6
+#define MSC_CFG_offAUDIO 7
+#define MSC_CFG_offL2C 9
+#define MSC_CFG_offRDREG 10
+#define MSC_CFG_offADR24 11
+#define MSC_CFG_offINTLC 12
+#define MSC_CFG_offBASEV 13
+#define MSC_CFG_offNOD 16
+/* bit 13:31 reserved */
+
+#define MSC_CFG_mskEDM ( 0x1 << MSC_CFG_offEDM )
+#define MSC_CFG_mskLMDMA ( 0x1 << MSC_CFG_offLMDMA )
+#define MSC_CFG_mskPFM ( 0x1 << MSC_CFG_offPFM )
+#define MSC_CFG_mskHSMP ( 0x1 << MSC_CFG_offHSMP )
+#define MSC_CFG_mskTRACE ( 0x1 << MSC_CFG_offTRACE )
+#define MSC_CFG_mskDIV ( 0x1 << MSC_CFG_offDIV )
+#define MSC_CFG_mskMAC ( 0x1 << MSC_CFG_offMAC )
+#define MSC_CFG_mskAUDIO ( 0x3 << MSC_CFG_offAUDIO )
+#define MSC_CFG_mskL2C ( 0x1 << MSC_CFG_offL2C )
+#define MSC_CFG_mskRDREG ( 0x1 << MSC_CFG_offRDREG )
+#define MSC_CFG_mskADR24 ( 0x1 << MSC_CFG_offADR24 )
+#define MSC_CFG_mskINTLC ( 0x1 << MSC_CFG_offINTLC )
+#define MSC_CFG_mskBASEV ( 0x7 << MSC_CFG_offBASEV )
+#define MSC_CFG_mskNOD ( 0x1 << MSC_CFG_offNOD )
+
+/******************************************************************************
+ * cr5: CORE_CFG (Core Identification Register)
+ *****************************************************************************/
+#define CORE_ID_offCOREID 0
+/* bit 4:31 reserved */
+
+#define CORE_ID_mskCOREID ( 0xF << CORE_ID_offCOREID )
+
+/******************************************************************************
+ * cr6: FUCOP_EXIST (FPU and Coprocessor Existence Configuration Register)
+ *****************************************************************************/
+#define FUCOP_EXIST_offCP0EX 0
+#define FUCOP_EXIST_offCP1EX 1
+#define FUCOP_EXIST_offCP2EX 2
+#define FUCOP_EXIST_offCP3EX 3
+#define FUCOP_EXIST_offCP0ISFPU 31
+
+#define FUCOP_EXIST_mskCP0EX ( 0x1 << FUCOP_EXIST_offCP0EX )
+#define FUCOP_EXIST_mskCP1EX ( 0x1 << FUCOP_EXIST_offCP1EX )
+#define FUCOP_EXIST_mskCP2EX ( 0x1 << FUCOP_EXIST_offCP2EX )
+#define FUCOP_EXIST_mskCP3EX ( 0x1 << FUCOP_EXIST_offCP3EX )
+#define FUCOP_EXIST_mskCP0ISFPU ( 0x1 << FUCOP_EXIST_offCP0ISFPU )
+
+/******************************************************************************
+ * ir0: PSW (Processor Status Word Register)
+ * ir1: IPSW (Interruption PSW Register)
+ * ir2: P_IPSW (Previous IPSW Register)
+ *****************************************************************************/
+#define PSW_offGIE 0 /* Global Interrupt Enable */
+#define PSW_offINTL 1 /* Interruption Stack Level */
+#define PSW_offPOM 3 /* Processor Operation Mode, User/Superuser */
+#define PSW_offBE 5 /* Endianness for data memory access, 1:MSB, 0:LSB */
+#define PSW_offIT 6 /* Enable instruction address translation */
+#define PSW_offDT 7 /* Enable data address translation */
+#define PSW_offIME 8 /* Instruction Machine Error flag */
+#define PSW_offDME 9 /* Data Machine Error flag */
+#define PSW_offDEX 10 /* Debug Exception */
+#define PSW_offHSS 11 /* Hardware Single Stepping */
+#define PSW_offDRBE 12 /* Device Register Endian Mode */
+#define PSW_offAEN 13 /* Audio ISA special feature */
+#define PSW_offWBNA 14 /* Write Back Non-Allocate */
+#define PSW_offIFCON 15 /* IFC On */
+#define PSW_offCPL 16 /* Current Priority Level */
+/* bit 19:31 reserved */
+
+#define PSW_mskGIE ( 0x1 << PSW_offGIE )
+#define PSW_mskINTL ( 0x3 << PSW_offINTL )
+#define PSW_mskPOM ( 0x3 << PSW_offPOM )
+#define PSW_mskBE ( 0x1 << PSW_offBE )
+#define PSW_mskIT ( 0x1 << PSW_offIT )
+#define PSW_mskDT ( 0x1 << PSW_offDT )
+#define PSW_mskIME ( 0x1 << PSW_offIME )
+#define PSW_mskDME ( 0x1 << PSW_offDME )
+#define PSW_mskDEX ( 0x1 << PSW_offDEX )
+#define PSW_mskHSS ( 0x1 << PSW_offHSS )
+#define PSW_mskDRBE ( 0x1 << PSW_offDRBE )
+#define PSW_mskAEN ( 0x1 << PSW_offAEN )
+#define PSW_mskWBNA ( 0x1 << PSW_offWBNA )
+#define PSW_mskIFCON ( 0x1 << PSW_offIFCON )
+#define PSW_mskCPL ( 0x7 << PSW_offCPL )
+
+#define PSW_SYSTEM ( 1 << PSW_offPOM )
+#define PSW_INTL_1 ( 1 << PSW_offINTL )
+#define PSW_CPL_NO ( 0 << PSW_offCPL )
+#define PSW_CPL_ANY ( 7 << PSW_offCPL )
+
+#define PSW_clr (PSW_mskGIE|PSW_mskINTL|PSW_mskPOM|PSW_mskIT|PSW_mskDT|PSW_mskIME|PSW_mskWBNA)
+#ifdef __NDS32_EB__
+#ifdef CONFIG_WBNA
+#define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE)
+#else
+#define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT|PSW_mskBE)
+#endif
+#else
+#ifdef CONFIG_WBNA
+#define PSW_init (PSW_mskWBNA|(1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT)
+#else
+#define PSW_init ((1<<PSW_offINTL)|(1<<PSW_offPOM)|PSW_mskIT|PSW_mskDT)
+#endif
+#endif
+/******************************************************************************
+ * ir3: IVB (Interruption Vector Base Register)
+ *****************************************************************************/
+/* bit 0:12 reserved */
+#define IVB_offNIVIC 1 /* Number of input for IVIC Controller */
+#define IVB_offIVIC_VER 11 /* IVIC Version */
+#define IVB_offEVIC 13 /* External Vector Interrupt Controller mode */
+#define IVB_offESZ 14 /* Size of each vector entry */
+#define IVB_offIVBASE 16 /* BasePA of interrupt vector table */
+
+#define IVB_mskNIVIC ( 0x7 << IVB_offNIVIC )
+#define IVB_mskIVIC_VER ( 0x3 << IVB_offIVIC_VER )
+#define IVB_mskEVIC ( 0x1 << IVB_offEVIC )
+#define IVB_mskESZ ( 0x3 << IVB_offESZ )
+#define IVB_mskIVBASE ( 0xFFFF << IVB_offIVBASE )
+
+#define IVB_valESZ4 0
+#define IVB_valESZ16 1
+#define IVB_valESZ64 2
+#define IVB_valESZ256 3
+/******************************************************************************
+ * ir4: EVA (Exception Virtual Address Register)
+ * ir5: P_EVA (Previous EVA Register)
+ *****************************************************************************/
+
+ /* This register contains the VA that causes the exception */
+
+/******************************************************************************
+ * ir6: ITYPE (Interruption Type Register)
+ * ir7: P_ITYPE (Previous ITYPE Register)
+ *****************************************************************************/
+#define ITYPE_offETYPE 0 /* Exception Type */
+#define ITYPE_offINST 4 /* Exception caused by insn fetch or data access */
+/* bit 5:15 reserved */
+#define ITYPE_offVECTOR 5 /* Vector */
+#define ITYPE_offSWID 16 /* SWID of debugging exception */
+/* bit 31:31 reserved */
+
+#define ITYPE_mskETYPE ( 0xF << ITYPE_offETYPE )
+#define ITYPE_mskINST ( 0x1 << ITYPE_offINST )
+#define ITYPE_mskVECTOR ( 0x7F << ITYPE_offVECTOR )
+#define ITYPE_mskSWID ( 0x7FFF << ITYPE_offSWID )
+
+/* Additional definitions for ITYPE register */
+#define ITYPE_offSTYPE 16 /* Arithmetic Sub Type */
+#define ITYPE_offCPID 20 /* Co-Processor ID which generate the exception */
+
+#define ITYPE_mskSTYPE ( 0xF << ITYPE_offSTYPE )
+#define ITYPE_mskCPID ( 0x3 << ITYPE_offCPID )
+
+#define NDS32_VECTOR_mskNONEXCEPTION 0x78
+#define NDS32_VECTOR_offEXCEPTION 8
+#define NDS32_VECTOR_offINTERRUPT 9
+
+/* Interrupt vector entry */
+#define ENTRY_RESET_NMI 0
+#define ENTRY_TLB_FILL 1
+#define ENTRY_PTE_NOT_PRESENT 2
+#define ENTRY_TLB_MISC 3
+#define ENTRY_TLB_VLPT_MISS 4
+#define ENTRY_MACHINE_ERROR 5
+#define ENTRY_DEBUG_RELATED 6
+#define ENTRY_GENERAL_EXCPETION 7
+#define ENTRY_SYSCALL 8
+
+/* PTE not present exception definition */
+#define ETYPE_NON_LEAF_PTE_NOT_PRESENT 0
+#define ETYPE_LEAF_PTE_NOT_PRESENT 1
+
+/* General exception ETYPE definition */
+#define ETYPE_ALIGNMENT_CHECK 0
+#define ETYPE_RESERVED_INSTRUCTION 1
+#define ETYPE_TRAP 2
+#define ETYPE_ARITHMETIC 3
+#define ETYPE_PRECISE_BUS_ERROR 4
+#define ETYPE_IMPRECISE_BUS_ERROR 5
+#define ETYPE_COPROCESSOR 6
+#define ETYPE_RESERVED_VALUE 7
+#define ETYPE_NONEXISTENT_MEM_ADDRESS 8
+#define ETYPE_MPZIU_CONTROL 9
+#define ETYPE_NEXT_PRECISE_STACK_OFL 10
+
+/* Kerenl reserves software ID */
+#define SWID_RAISE_INTERRUPT_LEVEL 0x1a /* SWID_RAISE_INTERRUPT_LEVEL is used to
+ * raise interrupt level for debug exception
+ */
+
+/******************************************************************************
+ * ir8: MERR (Machine Error Log Register)
+ *****************************************************************************/
+/* bit 0:30 reserved */
+#define MERR_offBUSERR 31 /* Bus error caused by a load insn */
+
+#define MERR_mskBUSERR ( 0x1 << MERR_offBUSERR )
+
+/******************************************************************************
+ * ir9: IPC (Interruption Program Counter Register)
+ * ir10: P_IPC (Previous IPC Register)
+ * ir11: OIPC (Overflow Interruption Program Counter Register)
+ *****************************************************************************/
+
+ /* This is the shadow stack register of the Program Counter */
+
+/******************************************************************************
+ * ir12: P_P0 (Previous P0 Register)
+ * ir13: P_P1 (Previous P1 Register)
+ *****************************************************************************/
+
+ /* These are shadow registers of $p0 and $p1 */
+
+/******************************************************************************
+ * ir14: INT_MASK (Interruption Masking Register)
+ *****************************************************************************/
+#define INT_MASK_offH0IM 0 /* Hardware Interrupt 0 Mask bit */
+#define INT_MASK_offH1IM 1 /* Hardware Interrupt 1 Mask bit */
+#define INT_MASK_offH2IM 2 /* Hardware Interrupt 2 Mask bit */
+#define INT_MASK_offH3IM 3 /* Hardware Interrupt 3 Mask bit */
+#define INT_MASK_offH4IM 4 /* Hardware Interrupt 4 Mask bit */
+#define INT_MASK_offH5IM 5 /* Hardware Interrupt 5 Mask bit */
+/* bit 6:15 reserved */
+#define INT_MASK_offSIM 16 /* Software Interrupt Mask bit */
+/* bit 17:29 reserved */
+#define INT_MASK_offIDIVZE 30 /* Enable detection for Divide-By-Zero */
+#define INT_MASK_offDSSIM 31 /* Default Single Stepping Interruption Mask */
+
+#define INT_MASK_mskH0IM ( 0x1 << INT_MASK_offH0IM )
+#define INT_MASK_mskH1IM ( 0x1 << INT_MASK_offH1IM )
+#define INT_MASK_mskH2IM ( 0x1 << INT_MASK_offH2IM )
+#define INT_MASK_mskH3IM ( 0x1 << INT_MASK_offH3IM )
+#define INT_MASK_mskH4IM ( 0x1 << INT_MASK_offH4IM )
+#define INT_MASK_mskH5IM ( 0x1 << INT_MASK_offH5IM )
+#define INT_MASK_mskSIM ( 0x1 << INT_MASK_offSIM )
+#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE )
+#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM )
+
+#define INT_MASK_INITAIAL_VAL 0x10003
+
+/******************************************************************************
+ * ir15: INT_PEND (Interrupt Pending Register)
+ *****************************************************************************/
+#define INT_PEND_offH0I 0 /* Hardware Interrupt 0 pending bit */
+#define INT_PEND_offH1I 1 /* Hardware Interrupt 1 pending bit */
+#define INT_PEND_offH2I 2 /* Hardware Interrupt 2 pending bit */
+#define INT_PEND_offH3I 3 /* Hardware Interrupt 3 pending bit */
+#define INT_PEND_offH4I 4 /* Hardware Interrupt 4 pending bit */
+#define INT_PEND_offH5I 5 /* Hardware Interrupt 5 pending bit */
+
+#define INT_PEND_offCIPL 0 /* Current Interrupt Priority Level */
+
+/* bit 6:15 reserved */
+#define INT_PEND_offSWI 16 /* Software Interrupt pending bit */
+/* bit 17:31 reserved */
+
+#define INT_PEND_mskH0I ( 0x1 << INT_PEND_offH0I )
+#define INT_PEND_mskH1I ( 0x1 << INT_PEND_offH1I )
+#define INT_PEND_mskH2I ( 0x1 << INT_PEND_offH2I )
+#define INT_PEND_mskH3I ( 0x1 << INT_PEND_offH3I )
+#define INT_PEND_mskH4I ( 0x1 << INT_PEND_offH4I )
+#define INT_PEND_mskH5I ( 0x1 << INT_PEND_offH5I )
+#define INT_PEND_mskCIPL ( 0x1 << INT_PEND_offCIPL )
+#define INT_PEND_mskSWI ( 0x1 << INT_PEND_offSWI )
+
+/******************************************************************************
+ * mr0: MMU_CTL (MMU Control Register)
+ *****************************************************************************/
+#define MMU_CTL_offD 0 /* Default minimum page size */
+#define MMU_CTL_offNTC0 1 /* Non-Translated Cachebility of partition 0 */
+#define MMU_CTL_offNTC1 3 /* Non-Translated Cachebility of partition 1 */
+#define MMU_CTL_offNTC2 5 /* Non-Translated Cachebility of partition 2 */
+#define MMU_CTL_offNTC3 7 /* Non-Translated Cachebility of partition 3 */
+#define MMU_CTL_offTBALCK 9 /* TLB all-lock resolution scheme */
+#define MMU_CTL_offMPZIU 10 /* Multiple Page Size In Use bit */
+#define MMU_CTL_offNTM0 11 /* Non-Translated VA to PA of partition 0 */
+#define MMU_CTL_offNTM1 13 /* Non-Translated VA to PA of partition 1 */
+#define MMU_CTL_offNTM2 15 /* Non-Translated VA to PA of partition 2 */
+#define MMU_CTL_offNTM3 17 /* Non-Translated VA to PA of partition 3 */
+#define MMU_CTL_offUNA 23 /* Unaligned access */
+/* bit 24:31 reserved */
+
+#define MMU_CTL_mskD ( 0x1 << MMU_CTL_offD )
+#define MMU_CTL_mskNTC0 ( 0x3 << MMU_CTL_offNTC0 )
+#define MMU_CTL_mskNTC1 ( 0x3 << MMU_CTL_offNTC1 )
+#define MMU_CTL_mskNTC2 ( 0x3 << MMU_CTL_offNTC2 )
+#define MMU_CTL_mskNTC3 ( 0x3 << MMU_CTL_offNTC3 )
+#define MMU_CTL_mskTBALCK ( 0x1 << MMU_CTL_offTBALCK )
+#define MMU_CTL_mskMPZIU ( 0x1 << MMU_CTL_offMPZIU )
+#define MMU_CTL_mskNTM0 ( 0x3 << MMU_CTL_offNTM0 )
+#define MMU_CTL_mskNTM1 ( 0x3 << MMU_CTL_offNTM1 )
+#define MMU_CTL_mskNTM2 ( 0x3 << MMU_CTL_offNTM2 )
+#define MMU_CTL_mskNTM3 ( 0x3 << MMU_CTL_offNTM3 )
+
+#define MMU_CTL_D4KB 0
+#define MMU_CTL_D8KB 1
+#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA )
+
+#define MMU_CTL_CACHEABLE_WB 2
+#define MMU_CTL_CACHEABLE_WT 3
+
+/******************************************************************************
+ * mr1: L1_PPTB (L1 Physical Page Table Base Register)
+ *****************************************************************************/
+#define L1_PPTB_offNV 0 /* Enable Hardware Page Table Walker (HPTWK) */
+/* bit 1:11 reserved */
+#define L1_PPTB_offBASE 12 /* First level physical page table base address */
+
+#define L1_PPTB_mskNV ( 0x1 << L1_PPTB_offNV )
+#define L1_PPTB_mskBASE ( 0xFFFFF << L1_PPTB_offBASE )
+
+/******************************************************************************
+ * mr2: TLB_VPN (TLB Access VPN Register)
+ *****************************************************************************/
+/* bit 0:11 reserved */
+#define TLB_VPN_offVPN 12 /* Virtual Page Number */
+
+#define TLB_VPN_mskVPN ( 0xFFFFF << TLB_VPN_offVPN )
+
+/******************************************************************************
+ * mr3: TLB_DATA (TLB Access Data Register)
+ *****************************************************************************/
+#define TLB_DATA_offV 0 /* PTE is valid and present */
+#define TLB_DATA_offM 1 /* Page read/write access privilege */
+#define TLB_DATA_offD 4 /* Dirty bit */
+#define TLB_DATA_offX 5 /* Executable bit */
+#define TLB_DATA_offA 6 /* Access bit */
+#define TLB_DATA_offG 7 /* Global page (shared across contexts) */
+#define TLB_DATA_offC 8 /* Cacheability atribute */
+/* bit 11:11 reserved */
+#define TLB_DATA_offPPN 12 /* Phisical Page Number */
+
+#define TLB_DATA_mskV ( 0x1 << TLB_DATA_offV )
+#define TLB_DATA_mskM ( 0x7 << TLB_DATA_offM )
+#define TLB_DATA_mskD ( 0x1 << TLB_DATA_offD )
+#define TLB_DATA_mskX ( 0x1 << TLB_DATA_offX )
+#define TLB_DATA_mskA ( 0x1 << TLB_DATA_offA )
+#define TLB_DATA_mskG ( 0x1 << TLB_DATA_offG )
+#define TLB_DATA_mskC ( 0x7 << TLB_DATA_offC )
+#define TLB_DATA_mskPPN ( 0xFFFFF << TLB_DATA_offPPN )
+
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|TLB_DATA_mskC)
+#else
+#define TLB_DATA_kernel_text_attr (TLB_DATA_mskV|TLB_DATA_mskM|TLB_DATA_mskD|TLB_DATA_mskX|TLB_DATA_mskG|(0x6 << TLB_DATA_offC))
+#endif
+
+/******************************************************************************
+ * mr4: TLB_MISC (TLB Access Misc Register)
+ *****************************************************************************/
+#define TLB_MISC_offACC_PSZ 0 /* Page size of a PTE entry */
+#define TLB_MISC_offCID 4 /* Context id */
+/* bit 13:31 reserved */
+
+#define TLB_MISC_mskACC_PSZ ( 0xF << TLB_MISC_offACC_PSZ )
+#define TLB_MISC_mskCID ( 0x1FF << TLB_MISC_offCID )
+
+/******************************************************************************
+ * mr5: VLPT_IDX (Virtual Linear Page Table Index Register)
+ *****************************************************************************/
+#define VLPT_IDX_offZERO 0 /* Always 0 */
+#define VLPT_IDX_offEVPN 2 /* Exception Virtual Page Number */
+#define VLPT_IDX_offVLPTB 22 /* Base VA of VLPT */
+
+#define VLPT_IDX_mskZERO ( 0x3 << VLPT_IDX_offZERO )
+#define VLPT_IDX_mskEVPN ( 0xFFFFF << VLPT_IDX_offEVPN )
+#define VLPT_IDX_mskVLPTB ( 0x3FF << VLPT_IDX_offVLPTB )
+
+/******************************************************************************
+ * mr6: ILMB (Instruction Local Memory Base Register)
+ *****************************************************************************/
+#define ILMB_offIEN 0 /* Enable ILM */
+#define ILMB_offILMSZ 1 /* Size of ILM */
+/* bit 5:19 reserved */
+#define ILMB_offIBPA 20 /* Base PA of ILM */
+
+#define ILMB_mskIEN ( 0x1 << ILMB_offIEN )
+#define ILMB_mskILMSZ ( 0xF << ILMB_offILMSZ )
+#define ILMB_mskIBPA ( 0xFFF << ILMB_offIBPA )
+
+/******************************************************************************
+ * mr7: DLMB (Data Local Memory Base Register)
+ *****************************************************************************/
+#define DLMB_offDEN 0 /* Enable DLM */
+#define DLMB_offDLMSZ 1 /* Size of DLM */
+#define DLMB_offDBM 5 /* Enable Double-Buffer Mode for DLM */
+#define DLMB_offDBB 6 /* Double-buffer bank which can be accessed by the processor */
+/* bit 7:19 reserved */
+#define DLMB_offDBPA 20 /* Base PA of DLM */
+
+#define DLMB_mskDEN ( 0x1 << DLMB_offDEN )
+#define DLMB_mskDLMSZ ( 0xF << DLMB_offDLMSZ )
+#define DLMB_mskDBM ( 0x1 << DLMB_offDBM )
+#define DLMB_mskDBB ( 0x1 << DLMB_offDBB )
+#define DLMB_mskDBPA ( 0xFFF << DLMB_offDBPA )
+
+/******************************************************************************
+ * mr8: CACHE_CTL (Cache Control Register)
+ *****************************************************************************/
+#define CACHE_CTL_offIC_EN 0 /* Enable I-cache */
+#define CACHE_CTL_offDC_EN 1 /* Enable D-cache */
+#define CACHE_CTL_offICALCK 2 /* I-cache all-lock resolution scheme */
+#define CACHE_CTL_offDCALCK 3 /* D-cache all-lock resolution scheme */
+#define CACHE_CTL_offDCCWF 4 /* Enable D-cache Critical Word Forwarding */
+#define CACHE_CTL_offDCPMW 5 /* Enable D-cache concurrent miss and write-back processing */
+/* bit 6:31 reserved */
+
+#define CACHE_CTL_mskIC_EN ( 0x1 << CACHE_CTL_offIC_EN )
+#define CACHE_CTL_mskDC_EN ( 0x1 << CACHE_CTL_offDC_EN )
+#define CACHE_CTL_mskICALCK ( 0x1 << CACHE_CTL_offICALCK )
+#define CACHE_CTL_mskDCALCK ( 0x1 << CACHE_CTL_offDCALCK )
+#define CACHE_CTL_mskDCCWF ( 0x1 << CACHE_CTL_offDCCWF )
+#define CACHE_CTL_mskDCPMW ( 0x1 << CACHE_CTL_offDCPMW )
+
+/******************************************************************************
+ * mr9: HSMP_SADDR (High Speed Memory Port Starting Address)
+ *****************************************************************************/
+#define HSMP_SADDR_offEN 0 /* Enable control bit for the High Speed Memory port */
+/* bit 1:19 reserved */
+
+#define HSMP_SADDR_offRANGE 1 /* Denote the address range (only defined in HSMP v2 ) */
+#define HSMP_SADDR_offSADDR 20 /* Starting base PA of the High Speed Memory Port region */
+
+#define HSMP_SADDR_mskEN ( 0x1 << HSMP_SADDR_offEN )
+#define HSMP_SADDR_mskRANGE ( 0xFFF << HSMP_SADDR_offRANGE )
+#define HSMP_SADDR_mskSADDR ( 0xFFF << HSMP_SADDR_offSADDR )
+
+/******************************************************************************
+ * mr10: HSMP_EADDR (High Speed Memory Port Ending Address)
+ *****************************************************************************/
+/* bit 0:19 reserved */
+#define HSMP_EADDR_offEADDR 20
+
+#define HSMP_EADDR_mskEADDR ( 0xFFF << HSMP_EADDR_offEADDR )
+
+/******************************************************************************
+ * dr0+(n*5): BPCn (n=0-7) (Breakpoint Control Register)
+ *****************************************************************************/
+#define BPC_offWP 0 /* Configuration of BPAn */
+#define BPC_offEL 1 /* Enable BPAn */
+#define BPC_offS 2 /* Data address comparison for a store instruction */
+#define BPC_offP 3 /* Compared data address is PA */
+#define BPC_offC 4 /* CID value is compared with the BPCIDn register */
+#define BPC_offBE0 5 /* Enable byte mask for the comparison with register */
+#define BPC_offBE1 6 /* Enable byte mask for the comparison with register */
+#define BPC_offBE2 7 /* Enable byte mask for the comparison with register */
+#define BPC_offBE3 8 /* Enable byte mask for the comparison with register */
+#define BPC_offT 9 /* Enable breakpoint Embedded Tracer triggering operation */
+
+#define BPC_mskWP ( 0x1 << BPC_offWP )
+#define BPC_mskEL ( 0x1 << BPC_offEL )
+#define BPC_mskS ( 0x1 << BPC_offS )
+#define BPC_mskP ( 0x1 << BPC_offP )
+#define BPC_mskC ( 0x1 << BPC_offC )
+#define BPC_mskBE0 ( 0x1 << BPC_offBE0 )
+#define BPC_mskBE1 ( 0x1 << BPC_offBE1 )
+#define BPC_mskBE2 ( 0x1 << BPC_offBE2 )
+#define BPC_mskBE3 ( 0x1 << BPC_offBE3 )
+#define BPC_mskT ( 0x1 << BPC_offT )
+
+/******************************************************************************
+ * dr1+(n*5): BPAn (n=0-7) (Breakpoint Address Register)
+ *****************************************************************************/
+
+ /* These registers contain break point address */
+
+/******************************************************************************
+ * dr2+(n*5): BPAMn (n=0-7) (Breakpoint Address Mask Register)
+ *****************************************************************************/
+
+ /* These registerd contain the address comparison mask for the BPAn register */
+
+/******************************************************************************
+ * dr3+(n*5): BPVn (n=0-7) Breakpoint Data Value Register
+ *****************************************************************************/
+
+ /* The BPVn register contains the data value that will be compared with the
+ * incoming load/store data value */
+
+/******************************************************************************
+ * dr4+(n*5): BPCIDn (n=0-7) (Breakpoint Context ID Register)
+ *****************************************************************************/
+#define BPCID_offCID 0 /* CID that will be compared with a process's CID */
+/* bit 9:31 reserved */
+
+#define BPCID_mskCID ( 0x1FF << BPCID_offCID )
+
+/******************************************************************************
+ * dr40: EDM_CFG (EDM Configuration Register)
+ *****************************************************************************/
+#define EDM_CFG_offBC 0 /* Number of hardware breakpoint sets implemented */
+#define EDM_CFG_offDIMU 3 /* Debug Instruction Memory Unit exists */
+/* bit 4:15 reserved */
+#define EDM_CFG_offVER 16 /* EDM version */
+
+#define EDM_CFG_mskBC ( 0x7 << EDM_CFG_offBC )
+#define EDM_CFG_mskDIMU ( 0x1 << EDM_CFG_offDIMU )
+#define EDM_CFG_mskVER ( 0xFFFF << EDM_CFG_offVER )
+
+/******************************************************************************
+ * dr41: EDMSW (EDM Status Word)
+ *****************************************************************************/
+#define EDMSW_offWV 0 /* Write Valid */
+#define EDMSW_offRV 1 /* Read Valid */
+#define EDMSW_offDE 2 /* Debug exception has occurred for this core */
+/* bit 3:31 reserved */
+
+#define EDMSW_mskWV ( 0x1 << EDMSW_offWV )
+#define EDMSW_mskRV ( 0x1 << EDMSW_offRV )
+#define EDMSW_mskDE ( 0x1 << EDMSW_offDE )
+
+/******************************************************************************
+ * dr42: EDM_CTL (EDM Control Register)
+ *****************************************************************************/
+/* bit 0:30 reserved */
+#define EDM_CTL_offV3_EDM_MODE 6 /* EDM compatibility control bit */
+#define EDM_CTL_offDEH_SEL 31 /* Controls where debug exception is directed to */
+
+#define EDM_CTL_mskV3_EDM_MODE ( 0x1 << EDM_CTL_offV3_EDM_MODE )
+#define EDM_CTL_mskDEH_SEL ( 0x1 << EDM_CTL_offDEH_SEL )
+
+/******************************************************************************
+ * dr43: EDM_DTR (EDM Data Transfer Register)
+ *****************************************************************************/
+
+ /* This is used to exchange data between the embedded EDM logic
+ * and the processor core */
+
+/******************************************************************************
+ * dr44: BPMTC (Breakpoint Match Trigger Counter Register)
+ *****************************************************************************/
+#define BPMTC_offBPMTC 0 /* Breakpoint match trigger counter value */
+/* bit 16:31 reserved */
+
+#define BPMTC_mskBPMTC ( 0xFFFF << BPMTC_offBPMTC )
+
+/******************************************************************************
+ * dr45: DIMBR (Debug Instruction Memory Base Register)
+ *****************************************************************************/
+/* bit 0:11 reserved */
+#define DIMBR_offDIMB 12 /* Base address of the Debug Instruction Memory (DIM) */
+#define DIMBR_mskDIMB ( 0xFFFFF << DIMBR_offDIMB )
+
+/******************************************************************************
+ * dr46: TECR0(Trigger Event Control register 0)
+ * dr47: TECR1 (Trigger Event Control register 1)
+ *****************************************************************************/
+#define TECR_offBP 0 /* Controld which BP is used as a trigger source */
+#define TECR_offNMI 8 /* Use NMI as a trigger source */
+#define TECR_offHWINT 9 /* Corresponding interrupt is used as a trigger source */
+#define TECR_offEVIC 15 /* Enable HWINT as a trigger source in EVIC mode */
+#define TECR_offSYS 16 /* Enable SYSCALL instruction as a trigger source */
+#define TECR_offDBG 17 /* Enable debug exception as a trigger source */
+#define TECR_offMRE 18 /* Enable MMU related exception as a trigger source */
+#define TECR_offE 19 /* An exception is used as a trigger source */
+/* bit 20:30 reserved */
+#define TECR_offL 31 /* Link/Cascade TECR0 trigger event to TECR1 trigger event */
+
+#define TECR_mskBP ( 0xFF << TECR_offBP )
+#define TECR_mskNMI ( 0x1 << TECR_offBNMI )
+#define TECR_mskHWINT ( 0x3F << TECR_offBHWINT )
+#define TECR_mskEVIC ( 0x1 << TECR_offBEVIC )
+#define TECR_mskSYS ( 0x1 << TECR_offBSYS )
+#define TECR_mskDBG ( 0x1 << TECR_offBDBG )
+#define TECR_mskMRE ( 0x1 << TECR_offBMRE )
+#define TECR_mskE ( 0x1 << TECR_offE )
+#define TECR_mskL ( 0x1 << TECR_offL )
+
+/******************************************************************************
+ * pfr0-2: PFMC0-2 (Performance Counter Register 0-2)
+ *****************************************************************************/
+
+ /* These registers contains performance event count */
+
+/******************************************************************************
+ * pfr3: PFM_CTL (Performance Counter Control Register)
+ *****************************************************************************/
+#define PFM_CTL_offEN0 0 /* Enable PFMC0 */
+#define PFM_CTL_offEN1 1 /* Enable PFMC1 */
+#define PFM_CTL_offEN2 2 /* Enable PFMC2 */
+#define PFM_CTL_offIE0 3 /* Enable interrupt for PFMC0 */
+#define PFM_CTL_offIE1 4 /* Enable interrupt for PFMC1 */
+#define PFM_CTL_offIE2 5 /* Enable interrupt for PFMC2 */
+#define PFM_CTL_offOVF0 6 /* Overflow bit of PFMC0 */
+#define PFM_CTL_offOVF1 7 /* Overflow bit of PFMC1 */
+#define PFM_CTL_offOVF2 8 /* Overflow bit of PFMC2 */
+#define PFM_CTL_offKS0 9 /* Enable superuser mode event counting for PFMC0 */
+#define PFM_CTL_offKS1 10 /* Enable superuser mode event counting for PFMC1 */
+#define PFM_CTL_offKS2 11 /* Enable superuser mode event counting for PFMC2 */
+#define PFM_CTL_offKU0 12 /* Enable user mode event counting for PFMC0 */
+#define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */
+#define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */
+#define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */
+#define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */
+#define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */
+/* bit 28:31 reserved */
+
+#define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 )
+#define PFM_CTL_mskEN1 ( 0x01 << PFM_CTL_offEN1 )
+#define PFM_CTL_mskEN2 ( 0x01 << PFM_CTL_offEN2 )
+#define PFM_CTL_mskIE0 ( 0x01 << PFM_CTL_offIE0 )
+#define PFM_CTL_mskIE1 ( 0x01 << PFM_CTL_offIE1 )
+#define PFM_CTL_mskIE2 ( 0x01 << PFM_CTL_offIE2 )
+#define PFM_CTL_mskOVF0 ( 0x01 << PFM_CTL_offOVF0 )
+#define PFM_CTL_mskOVF1 ( 0x01 << PFM_CTL_offOVF1 )
+#define PFM_CTL_mskOVF2 ( 0x01 << PFM_CTL_offOVF2 )
+#define PFM_CTL_mskKS0 ( 0x01 << PFM_CTL_offKS0 )
+#define PFM_CTL_mskKS1 ( 0x01 << PFM_CTL_offKS1 )
+#define PFM_CTL_mskKS2 ( 0x01 << PFM_CTL_offKS2 )
+#define PFM_CTL_mskKU0 ( 0x01 << PFM_CTL_offKU0 )
+#define PFM_CTL_mskKU1 ( 0x01 << PFM_CTL_offKU1 )
+#define PFM_CTL_mskKU2 ( 0x01 << PFM_CTL_offKU2 )
+#define PFM_CTL_mskSEL0 ( 0x01 << PFM_CTL_offSEL0 )
+#define PFM_CTL_mskSEL1 ( 0x3F << PFM_CTL_offSEL1 )
+#define PFM_CTL_mskSEL2 ( 0x3F << PFM_CTL_offSEL2 )
+
+/******************************************************************************
+ * SDZ_CTL (Structure Downsizing Control Register)
+ *****************************************************************************/
+#define SDZ_CTL_offICDZ 0 /* I-cache downsizing control */
+#define SDZ_CTL_offDCDZ 3 /* D-cache downsizing control */
+#define SDZ_CTL_offMTBDZ 6 /* MTLB downsizing control */
+#define SDZ_CTL_offBTBDZ 9 /* Branch Target Table downsizing control */
+/* bit 12:31 reserved */
+#define SDZ_CTL_mskICDZ ( 0x07 << SDZ_CTL_offICDZ )
+#define SDZ_CTL_mskDCDZ ( 0x07 << SDZ_CTL_offDCDZ )
+#define SDZ_CTL_mskMTBDZ ( 0x07 << SDZ_CTL_offMTBDZ )
+#define SDZ_CTL_mskBTBDZ ( 0x07 << SDZ_CTL_offBTBDZ )
+
+/******************************************************************************
+ * N13MISC_CTL (N13 Miscellaneous Control Register)
+ *****************************************************************************/
+#define N13MISC_CTL_offBTB 0 /* Disable Branch Target Buffer */
+#define N13MISC_CTL_offRTP 1 /* Disable Return Target Predictor */
+#define N13MISC_CTL_offPTEPF 2 /* Disable HPTWK L2 PTE pefetch */
+#define N13MISC_CTL_offSP_SHADOW_EN 4 /* Enable shadow stack pointers */
+/* bit 6, 9:31 reserved */
+
+#define N13MISC_CTL_makBTB ( 0x1 << N13MISC_CTL_offBTB )
+#define N13MISC_CTL_makRTP ( 0x1 << N13MISC_CTL_offRTP )
+#define N13MISC_CTL_makPTEPF ( 0x1 << N13MISC_CTL_offPTEPF )
+#define N13MISC_CTL_makSP_SHADOW_EN ( 0x1 << N13MISC_CTL_offSP_SHADOW_EN )
+
+#define MISC_init (N13MISC_CTL_makBTB|N13MISC_CTL_makRTP|N13MISC_CTL_makSP_SHADOW_EN)
+
+/******************************************************************************
+ * PRUSR_ACC_CTL (Privileged Resource User Access Control Registers)
+ *****************************************************************************/
+#define PRUSR_ACC_CTL_offDMA_EN 0 /* Allow user mode access of DMA registers */
+#define PRUSR_ACC_CTL_offPFM_EN 1 /* Allow user mode access of PFM registers */
+
+#define PRUSR_ACC_CTL_mskDMA_EN ( 0x1 << PRUSR_ACC_CTL_offDMA_EN )
+#define PRUSR_ACC_CTL_mskPFM_EN ( 0x1 << PRUSR_ACC_CTL_offPFM_EN )
+
+/******************************************************************************
+ * dmar0: DMA_CFG (DMA Configuration Register)
+ *****************************************************************************/
+#define DMA_CFG_offNCHN 0 /* The number of DMA channels implemented */
+#define DMA_CFG_offUNEA 2 /* Un-aligned External Address transfer feature */
+#define DMA_CFG_off2DET 3 /* 2-D Element Transfer feature */
+/* bit 4:15 reserved */
+#define DMA_CFG_offVER 16 /* DMA architecture and implementation version */
+
+#define DMA_CFG_mskNCHN ( 0x3 << DMA_CFG_offNCHN )
+#define DMA_CFG_mskUNEA ( 0x1 << DMA_CFG_offUNEA )
+#define DMA_CFG_msk2DET ( 0x1 << DMA_CFG_off2DET )
+#define DMA_CFG_mskVER ( 0xFFFF << DMA_CFG_offVER )
+
+/******************************************************************************
+ * dmar1: DMA_GCSW (DMA Global Control and Status Word Register)
+ *****************************************************************************/
+#define DMA_GCSW_offC0STAT 0 /* DMA channel 0 state */
+#define DMA_GCSW_offC1STAT 3 /* DMA channel 1 state */
+/* bit 6:11 reserved */
+#define DMA_GCSW_offC0INT 12 /* DMA channel 0 generate interrupt */
+#define DMA_GCSW_offC1INT 13 /* DMA channel 1 generate interrupt */
+/* bit 14:30 reserved */
+#define DMA_GCSW_offEN 31 /* Enable DMA engine */
+
+#define DMA_GCSW_mskC0STAT ( 0x7 << DMA_GCSW_offC0STAT )
+#define DMA_GCSW_mskC1STAT ( 0x7 << DMA_GCSW_offC1STAT )
+#define DMA_GCSW_mskC0INT ( 0x1 << DMA_GCSW_offC0INT )
+#define DMA_GCSW_mskC1INT ( 0x1 << DMA_GCSW_offC1INT )
+#define DMA_GCSW_mskEN ( 0x1 << DMA_GCSW_offEN )
+
+/******************************************************************************
+ * dmar2: DMA_CHNSEL (DMA Channel Selection Register)
+ *****************************************************************************/
+#define DMA_CHNSEL_offCHAN 0 /* Selected channel number */
+/* bit 2:31 reserved */
+
+#define DMA_CHNSEL_mskCHAN ( 0x3 << DMA_CHNSEL_offCHAN )
+
+/******************************************************************************
+ * dmar3: DMA_ACT (DMA Action Register)
+ *****************************************************************************/
+#define DMA_ACT_offACMD 0 /* DMA Action Command */
+/* bit 2:31 reserved */
+#define DMA_ACT_mskACMD ( 0x3 << DMA_ACT_offACMD )
+
+/******************************************************************************
+ * dmar4: DMA_SETUP (DMA Setup Register)
+ *****************************************************************************/
+#define DMA_SETUP_offLM 0 /* Local Memory Selection */
+#define DMA_SETUP_offTDIR 1 /* Transfer Direction */
+#define DMA_SETUP_offTES 2 /* Transfer Element Size */
+#define DMA_SETUP_offESTR 4 /* External memory transfer Stride */
+#define DMA_SETUP_offCIE 16 /* Interrupt Enable on Completion */
+#define DMA_SETUP_offSIE 17 /* Interrupt Enable on explicit Stop */
+#define DMA_SETUP_offEIE 18 /* Interrupt Enable on Error */
+#define DMA_SETUP_offUE 19 /* Enable the Un-aligned External Address */
+#define DMA_SETUP_off2DE 20 /* Enable the 2-D External Transfer */
+#define DMA_SETUP_offCOA 21 /* Transfer Coalescable */
+/* bit 22:31 reserved */
+
+#define DMA_SETUP_mskLM ( 0x1 << DMA_SETUP_offLM )
+#define DMA_SETUP_mskTDIR ( 0x1 << DMA_SETUP_offTDIR )
+#define DMA_SETUP_mskTES ( 0x3 << DMA_SETUP_offTES )
+#define DMA_SETUP_mskESTR ( 0xFFF << DMA_SETUP_offESTR )
+#define DMA_SETUP_mskCIE ( 0x1 << DMA_SETUP_offCIE )
+#define DMA_SETUP_mskSIE ( 0x1 << DMA_SETUP_offSIE )
+#define DMA_SETUP_mskEIE ( 0x1 << DMA_SETUP_offEIE )
+#define DMA_SETUP_mskUE ( 0x1 << DMA_SETUP_offUE )
+#define DMA_SETUP_msk2DE ( 0x1 << DMA_SETUP_off2DE )
+#define DMA_SETUP_mskCOA ( 0x1 << DMA_SETUP_offCOA )
+
+/******************************************************************************
+ * dmar5: DMA_ISADDR (DMA Internal Start Address Register)
+ *****************************************************************************/
+#define DMA_ISADDR_offISADDR 0 /* Internal Start Address */
+/* bit 20:31 reserved */
+#define DMA_ISADDR_mskISADDR ( 0xFFFFF << DMA_ISADDR_offISADDR )
+
+/******************************************************************************
+ * dmar6: DMA_ESADDR (DMA External Start Address Register)
+ *****************************************************************************/
+/* This register holds External Start Address */
+
+/******************************************************************************
+ * dmar7: DMA_TCNT (DMA Transfer Element Count Register)
+ *****************************************************************************/
+#define DMA_TCNT_offTCNT 0 /* DMA transfer element count */
+/* bit 18:31 reserved */
+#define DMA_TCNT_mskTCNT ( 0x3FFFF << DMA_TCNT_offTCNT )
+
+/******************************************************************************
+ * dmar8: DMA_STATUS (DMA Status Register)
+ *****************************************************************************/
+#define DMA_STATUS_offSTAT 0 /* DMA channel state */
+#define DMA_STATUS_offSTUNA 3 /* Un-aligned error on External Stride value */
+#define DMA_STATUS_offDERR 4 /* DMA Transfer Disruption Error */
+#define DMA_STATUS_offEUNA 5 /* Un-aligned error on the External address */
+#define DMA_STATUS_offIUNA 6 /* Un-aligned error on the Internal address */
+#define DMA_STATUS_offIOOR 7 /* Out-Of-Range error on the Internal address */
+#define DMA_STATUS_offEBUS 8 /* Bus Error on an External DMA transfer */
+#define DMA_STATUS_offESUP 9 /* DMA setup error */
+/* bit 10:31 reserved */
+
+#define DMA_STATUS_mskSTAT ( 0x7 << DMA_STATUS_offSTAT )
+#define DMA_STATUS_mskSTUNA ( 0x1 << DMDMA_STATUS_offSTUNA )
+#define DMA_STATUS_mskDERR ( 0x1 << DMDMA_STATUS_offDERR )
+#define DMA_STATUS_mskEUNA ( 0x1 << DMDMA_STATUS_offEUNA )
+#define DMA_STATUS_mskIUNA ( 0x1 << DMDMA_STATUS_offIUNA )
+#define DMA_STATUS_mskIOOR ( 0x1 << DMDMA_STATUS_offIOOR )
+#define DMA_STATUS_mskEBUS ( 0x1 << DMDMA_STATUS_offEBUS )
+#define DMA_STATUS_mskESUP ( 0x1 << DMDMA_STATUS_offESUP )
+
+/******************************************************************************
+ * dmar9: DMA_2DSET (DMA 2D Setup Register)
+ *****************************************************************************/
+#define DMA_2DSET_offWECNT 0 /* The Width Element Count for a 2-D region */
+#define DMA_2DSET_offHTSTR 16 /* The Height Stride for a 2-D region */
+
+#define DMA_2DSET_mskHTSTR ( 0xFFFF << DMA_2DSET_offHTSTR )
+#define DMA_2DSET_mskWECNT ( 0xFFFF << DMA_2DSET_offWECNT )
+
+/******************************************************************************
+ * dmar10: DMA_2DSCTL (DMA 2D Startup Control Register)
+ *****************************************************************************/
+#define DMA_2DSCTL_offSTWECNT 0 /* Startup Width Element Count for a 2-D region */
+/* bit 16:31 reserved */
+
+#define DMA_2DSCTL_mskSTWECNT ( 0xFFFF << DMA_2DSCTL_offSTWECNT )
+
+/******************************************************************************
+ * fpcsr: FPCSR (Floating-Point Control Status Register)
+ *****************************************************************************/
+#define FPCSR_offRM 0
+#define FPCSR_offIVO 2
+#define FPCSR_offDBZ 3
+#define FPCSR_offOVF 4
+#define FPCSR_offUDF 5
+#define FPCSR_offIEX 6
+#define FPCSR_offIVOE 7
+#define FPCSR_offDBZE 8
+#define FPCSR_offOVFE 9
+#define FPCSR_offUDFE 10
+#define FPCSR_offIEXE 11
+#define FPCSR_offDNZ 12
+#define FPCSR_offIVOT 13
+#define FPCSR_offDBZT 14
+#define FPCSR_offOVFT 15
+#define FPCSR_offUDFT 16
+#define FPCSR_offIEXT 17
+#define FPCSR_offDNIT 18
+#define FPCSR_offRIT 19
+
+#define FPCSR_mskRM ( 0x3 << FPCSR_offRM )
+#define FPCSR_mskIVO ( 0x1 << FPCSR_offIVO )
+#define FPCSR_mskDBZ ( 0x1 << FPCSR_offDBZ )
+#define FPCSR_mskOVF ( 0x1 << FPCSR_offOVF )
+#define FPCSR_mskUDF ( 0x1 << FPCSR_offUDF )
+#define FPCSR_mskIEX ( 0x1 << FPCSR_offIEX )
+#define FPCSR_mskIVOE ( 0x1 << FPCSR_offIVOE )
+#define FPCSR_mskDBZE ( 0x1 << FPCSR_offDBZE )
+#define FPCSR_mskOVFE ( 0x1 << FPCSR_offOVFE )
+#define FPCSR_mskUDFE ( 0x1 << FPCSR_offUDFE )
+#define FPCSR_mskIEXE ( 0x1 << FPCSR_offIEXE )
+#define FPCSR_mskDNZ ( 0x1 << FPCSR_offDNZ )
+#define FPCSR_mskIVOT ( 0x1 << FPCSR_offIVOT )
+#define FPCSR_mskDBZT ( 0x1 << FPCSR_offDBZT )
+#define FPCSR_mskOVFT ( 0x1 << FPCSR_offOVFT )
+#define FPCSR_mskUDFT ( 0x1 << FPCSR_offUDFT )
+#define FPCSR_mskIEXT ( 0x1 << FPCSR_offIEXT )
+#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT )
+#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT )
+#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX)
+#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE)
+#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT)
+
+/******************************************************************************
+ * fpcfg: FPCFG (Floating-Point Configuration Register)
+ *****************************************************************************/
+#define FPCFG_offSP 0
+#define FPCFG_offDP 1
+#define FPCFG_offFREG 2
+#define FPCFG_offFMA 4
+#define FPCFG_offIMVER 22
+#define FPCFG_offAVER 27
+
+#define FPCFG_mskSP ( 0x1 << FPCFG_offSP )
+#define FPCFG_mskDP ( 0x1 << FPCFG_offDP )
+#define FPCFG_mskFREG ( 0x3 << FPCFG_offFREG )
+#define FPCFG_mskFMA ( 0x1 << FPCFG_offFMA )
+#define FPCFG_mskIMVER ( 0x1F << FPCFG_offIMVER )
+#define FPCFG_mskAVER ( 0x1F << FPCFG_offAVER )
+
+/******************************************************************************
+ * fucpr: FUCOP_CTL (FPU and Coprocessor Enable Control Register)
+ *****************************************************************************/
+#define FUCOP_CTL_offCP0EN 0
+#define FUCOP_CTL_offCP1EN 1
+#define FUCOP_CTL_offCP2EN 2
+#define FUCOP_CTL_offCP3EN 3
+#define FUCOP_CTL_offAUEN 31
+
+#define FUCOP_CTL_mskCP0EN ( 0x1 << FUCOP_CTL_offCP0EN )
+#define FUCOP_CTL_mskCP1EN ( 0x1 << FUCOP_CTL_offCP1EN )
+#define FUCOP_CTL_mskCP2EN ( 0x1 << FUCOP_CTL_offCP2EN )
+#define FUCOP_CTL_mskCP3EN ( 0x1 << FUCOP_CTL_offCP3EN )
+#define FUCOP_CTL_mskAUEN ( 0x1 << FUCOP_CTL_offAUEN )
+
+#endif /* __NDS32_BITFIELD_H__ */
diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h
new file mode 100644
index 000000000000..347db4881c5f
--- /dev/null
+++ b/arch/nds32/include/asm/cache.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_CACHE_H__
+#define __NDS32_CACHE_H__
+
+#define L1_CACHE_BYTES 32
+#define L1_CACHE_SHIFT 5
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif /* __NDS32_CACHE_H__ */
diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h
new file mode 100644
index 000000000000..38ec458ba543
--- /dev/null
+++ b/arch/nds32/include/asm/cache_info.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+struct cache_info {
+ unsigned char ways;
+ unsigned char line_size;
+ unsigned short sets;
+ unsigned short size;
+#if defined(CONFIG_CPU_CACHE_ALIASING)
+ unsigned short aliasing_num;
+ unsigned int aliasing_mask;
+#endif
+};
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
new file mode 100644
index 000000000000..7b9b20a381cb
--- /dev/null
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_CACHEFLUSH_H__
+#define __NDS32_CACHEFLUSH_H__
+
+#include <linux/mm.h>
+
+#define PG_dcache_dirty PG_arch_1
+
+#ifdef CONFIG_CPU_CACHE_ALIASING
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_dup_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn);
+void flush_cache_kmaps(void);
+void flush_cache_vmap(unsigned long start, unsigned long end);
+void flush_cache_vunmap(unsigned long start, unsigned long end);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+void flush_dcache_page(struct page *page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len);
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len);
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr);
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+void flush_kernel_dcache_page(struct page *page);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#define flush_dcache_mmap_lock(mapping) spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) spin_unlock_irq(&(mapping)->tree_lock)
+
+#else
+#include <asm-generic/cacheflush.h>
+#endif
+
+#endif /* __NDS32_CACHEFLUSH_H__ */
diff --git a/arch/nds32/include/asm/current.h b/arch/nds32/include/asm/current.h
new file mode 100644
index 000000000000..b4dcd22b7bcb
--- /dev/null
+++ b/arch/nds32/include/asm/current.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASM_NDS32_CURRENT_H
+#define _ASM_NDS32_CURRENT_H
+
+#ifndef __ASSEMBLY__
+register struct task_struct *current asm("$r25");
+#endif /* __ASSEMBLY__ */
+#define tsk $r25
+
+#endif /* _ASM_NDS32_CURRENT_H */
diff --git a/arch/nds32/include/asm/delay.h b/arch/nds32/include/asm/delay.h
new file mode 100644
index 000000000000..519ba97acb6e
--- /dev/null
+++ b/arch/nds32/include/asm/delay.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_DELAY_H__
+#define __NDS32_DELAY_H__
+
+#include <asm/param.h>
+
+/* There is no clocksource cycle counter in the CPU. */
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(".align 2\n"
+ "1:\n"
+ "\taddi\t%0, %0, -1\n"
+ "\tbgtz\t%0, 1b\n"
+ :"=r"(loops)
+ :"0"(loops));
+}
+
+static inline void __udelay(unsigned long usecs, unsigned long lpj)
+{
+ usecs *= (unsigned long)(((0x8000000000000000ULL / (500000 / HZ)) +
+ 0x80000000ULL) >> 32);
+ usecs = (unsigned long)(((unsigned long long)usecs * lpj) >> 32);
+ __delay(usecs);
+}
+
+#define udelay(usecs) __udelay((usecs), loops_per_jiffy)
+
+/* make sure "usecs *= ..." in udelay do not overflow. */
+#if HZ >= 1000
+#define MAX_UDELAY_MS 1
+#elif HZ <= 200
+#define MAX_UDELAY_MS 5
+#else
+#define MAX_UDELAY_MS (1000 / HZ)
+#endif
+
+#endif
diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..2dd47d245c25
--- /dev/null
+++ b/arch/nds32/include/asm/dma-mapping.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef ASMNDS32_DMA_MAPPING_H
+#define ASMNDS32_DMA_MAPPING_H
+
+extern struct dma_map_ops nds32_dma_ops;
+
+static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &nds32_dma_ops;
+}
+
+#endif
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
new file mode 100644
index 000000000000..56c479058802
--- /dev/null
+++ b/arch/nds32/include/asm/elf.h
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASMNDS32_ELF_H
+#define __ASMNDS32_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+
+#include <asm/ptrace.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_freg_t[3];
+
+extern unsigned int elf_hwcap;
+
+#define EM_NDS32 167
+
+#define R_NDS32_NONE 0
+#define R_NDS32_16_RELA 19
+#define R_NDS32_32_RELA 20
+#define R_NDS32_9_PCREL_RELA 22
+#define R_NDS32_15_PCREL_RELA 23
+#define R_NDS32_17_PCREL_RELA 24
+#define R_NDS32_25_PCREL_RELA 25
+#define R_NDS32_HI20_RELA 26
+#define R_NDS32_LO12S3_RELA 27
+#define R_NDS32_LO12S2_RELA 28
+#define R_NDS32_LO12S1_RELA 29
+#define R_NDS32_LO12S0_RELA 30
+#define R_NDS32_SDA15S3_RELA 31
+#define R_NDS32_SDA15S2_RELA 32
+#define R_NDS32_SDA15S1_RELA 33
+#define R_NDS32_SDA15S0_RELA 34
+#define R_NDS32_GOT20 37
+#define R_NDS32_25_PLTREL 38
+#define R_NDS32_COPY 39
+#define R_NDS32_GLOB_DAT 40
+#define R_NDS32_JMP_SLOT 41
+#define R_NDS32_RELATIVE 42
+#define R_NDS32_GOTOFF 43
+#define R_NDS32_GOTPC20 44
+#define R_NDS32_GOT_HI20 45
+#define R_NDS32_GOT_LO12 46
+#define R_NDS32_GOTPC_HI20 47
+#define R_NDS32_GOTPC_LO12 48
+#define R_NDS32_GOTOFF_HI20 49
+#define R_NDS32_GOTOFF_LO12 50
+#define R_NDS32_INSN16 51
+#define R_NDS32_LABEL 52
+#define R_NDS32_LONGCALL1 53
+#define R_NDS32_LONGCALL2 54
+#define R_NDS32_LONGCALL3 55
+#define R_NDS32_LONGJUMP1 56
+#define R_NDS32_LONGJUMP2 57
+#define R_NDS32_LONGJUMP3 58
+#define R_NDS32_LOADSTORE 59
+#define R_NDS32_9_FIXED_RELA 60
+#define R_NDS32_15_FIXED_RELA 61
+#define R_NDS32_17_FIXED_RELA 62
+#define R_NDS32_25_FIXED_RELA 63
+#define R_NDS32_PLTREL_HI20 64
+#define R_NDS32_PLTREL_LO12 65
+#define R_NDS32_PLT_GOTREL_HI20 66
+#define R_NDS32_PLT_GOTREL_LO12 67
+#define R_NDS32_LO12S0_ORI_RELA 72
+#define R_NDS32_DWARF2_OP1_RELA 77
+#define R_NDS32_DWARF2_OP2_RELA 78
+#define R_NDS32_DWARF2_LEB_RELA 79
+#define R_NDS32_WORD_9_PCREL_RELA 94
+#define R_NDS32_LONGCALL4 107
+#define R_NDS32_RELA_NOP_MIX 192
+#define R_NDS32_RELA_NOP_MAX 255
+
+#define ELF_NGREG (sizeof (struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+ *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user (under
+ linux we use the 'trad-core' bfd). There are quite a number of
+ obstacles to being able to view the contents of the floating point
+ registers, and until these are solved you will not be able to view the
+ contents of them. Actually, you can read in the core file and look at
+ the contents of the user struct to find out what the floating point
+ registers contain.
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+struct user_fp {
+ unsigned long long fd_regs[32];
+ unsigned long fpcsr;
+};
+
+typedef struct user_fp elf_fpregset_t;
+
+struct elf32_hdr;
+#define elf_check_arch(x) ((x)->e_machine == EM_NDS32)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#ifdef __NDS32_EB__
+#define ELF_DATA ELFDATA2MSB;
+#else
+#define ELF_DATA ELFDATA2LSB;
+#endif
+#define ELF_ARCH EM_NDS32
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ have no such handler. */
+#define ELF_PLAT_INIT(_r, load_addr) (_r)->uregs[0] = 0
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+
+#define ELF_HWCAP (elf_hwcap)
+
+#ifdef __KERNEL__
+
+#define ELF_PLATFORM (NULL)
+
+/* Old NetWinder binaries were compiled in such a way that the iBCS
+ heuristic always trips on them. Until these binaries become uncommon
+ enough not to care, don't trust the `ibcs' flag here. In any case
+ there is no other ELF system currently supported by iBCS.
+ @@ Could print a warning message to encourage users to upgrade. */
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#endif
+
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+} while (0)
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+
+#endif
diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h
new file mode 100644
index 000000000000..0e60e153a71a
--- /dev/null
+++ b/arch/nds32/include/asm/fixmap.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_FIXMAP_H
+#define __ASM_NDS32_FIXMAP_H
+
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+enum fixed_addresses {
+ FIX_HOLE,
+ FIX_KMAP_RESERVED,
+ FIX_KMAP_BEGIN,
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS),
+#endif
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_fixed_addresses
+};
+#define FIXADDR_TOP ((unsigned long) (-(16 * PAGE_SIZE)))
+#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXMAP_PAGE_IO __pgprot(PAGE_DEVICE)
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
+#endif /* __ASM_NDS32_FIXMAP_H */
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h
new file mode 100644
index 000000000000..eab5e84bd991
--- /dev/null
+++ b/arch/nds32/include/asm/futex.h
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_FUTEX_H__
+#define __NDS32_FUTEX_H__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_ex_table(err_reg) \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f\n" \
+ " .long 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ "4: move %0, " err_reg "\n" \
+ " j 3b\n" \
+ " .popsection"
+
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+ smp_mb(); \
+ asm volatile( \
+ " movi $ta, #0\n" \
+ "1: llw %1, [%2+$ta]\n" \
+ " " insn "\n" \
+ "2: scw %0, [%2+$ta]\n" \
+ " beqz %0, 1b\n" \
+ " movi %0, #0\n" \
+ "3:\n" \
+ __futex_atomic_ex_table("%4") \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
+ : "cc", "memory")
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 * uval, u32 __user * uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val, tmp, flags;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ smp_mb();
+ asm volatile (" movi $ta, #0\n"
+ "1: llw %1, [%6 + $ta]\n"
+ " sub %3, %1, %4\n"
+ " cmovz %2, %5, %3\n"
+ " cmovn %2, %1, %3\n"
+ "2: scw %2, [%6 + $ta]\n"
+ " beqz %2, 1b\n"
+ "3:\n " __futex_atomic_ex_table("%7")
+ :"+&r"(ret), "=&r"(val), "=&r"(tmp), "=&r"(flags)
+ :"r"(oldval), "r"(newval), "r"(uaddr), "i"(-EFAULT)
+ :"$ta", "memory");
+ smp_mb();
+
+ *uval = val;
+ return ret;
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+
+ pagefault_disable();
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("move %0, %3", ret, oldval, tmp, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, tmp, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, tmp, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %0, %1, %3", ret, oldval, tmp, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, tmp, uaddr,
+ oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+#endif /* __NDS32_FUTEX_H__ */
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
new file mode 100644
index 000000000000..425d546cb059
--- /dev/null
+++ b/arch/nds32/include/asm/highmem.h
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#include <asm/proc-fns.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * Ordering is (from lower to higher memory addresses):
+ *
+ * high_memory
+ * Persistent kmap area
+ * PKMAP_BASE
+ * fixed_addresses
+ * FIXADDR_START
+ * FIXADDR_TOP
+ * Vmalloc area
+ * VMALLOC_START
+ * VMALLOC_END
+ */
+#define PKMAP_BASE ((FIXADDR_START - PGDIR_SIZE) & (PGDIR_MASK))
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define kmap_prot PAGE_KERNEL
+
+static inline void flush_cache_kmaps(void)
+{
+ cpu_dcache_wbinval_all();
+}
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
+#endif
+
+#endif
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
new file mode 100644
index 000000000000..966e71b3c960
--- /dev/null
+++ b/arch/nds32/include/asm/io.h
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_IO_H
+#define __ASM_NDS32_IO_H
+
+extern void iounmap(volatile void __iomem *addr);
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sbi %0, [%1]" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("shi %0, [%1]" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("swi %0, [%1]" : : "r" (val), "r" (addr));
+}
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lbi %0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lhi %0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lwi %0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __iormb() rmb()
+#define __iowmb() wmb()
+
+#define mmiowb() __asm__ __volatile__ ("msync all" : : : "memory");
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+
+#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
+#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
+#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
+#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
+#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
+#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
+#include <asm-generic/io.h>
+#endif /* __ASM_NDS32_IO_H */
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h
new file mode 100644
index 000000000000..2bfd00f8bc48
--- /dev/null
+++ b/arch/nds32/include/asm/irqflags.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <asm/nds32.h>
+#include <nds32_intrinsic.h>
+
+#define arch_local_irq_disable() \
+ GIE_DISABLE();
+
+#define arch_local_irq_enable() \
+ GIE_ENABLE();
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE;
+ GIE_DISABLE();
+ return flags;
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ flags = __nds32__mfsr(NDS32_SR_PSW) & PSW_mskGIE;
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ if(flags)
+ GIE_ENABLE();
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !flags;
+}
diff --git a/arch/nds32/include/asm/l2_cache.h b/arch/nds32/include/asm/l2_cache.h
new file mode 100644
index 000000000000..37dd5ef61de8
--- /dev/null
+++ b/arch/nds32/include/asm/l2_cache.h
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef L2_CACHE_H
+#define L2_CACHE_H
+
+/* CCTL_CMD_OP */
+#define L2_CA_CONF_OFF 0x0
+#define L2_IF_CONF_OFF 0x4
+#define L2CC_SETUP_OFF 0x8
+#define L2CC_PROT_OFF 0xC
+#define L2CC_CTRL_OFF 0x10
+#define L2_INT_EN_OFF 0x20
+#define L2_STA_OFF 0x24
+#define RDERR_ADDR_OFF 0x28
+#define WRERR_ADDR_OFF 0x2c
+#define EVDPTERR_ADDR_OFF 0x30
+#define IMPL3ERR_ADDR_OFF 0x34
+#define L2_CNT0_CTRL_OFF 0x40
+#define L2_EVNT_CNT0_OFF 0x44
+#define L2_CNT1_CTRL_OFF 0x48
+#define L2_EVNT_CNT1_OFF 0x4c
+#define L2_CCTL_CMD_OFF 0x60
+#define L2_CCTL_STATUS_OFF 0x64
+#define L2_LINE_TAG_OFF 0x68
+#define L2_LINE_DPT_OFF 0x70
+
+#define CCTL_CMD_L2_IX_INVAL 0x0
+#define CCTL_CMD_L2_PA_INVAL 0x1
+#define CCTL_CMD_L2_IX_WB 0x2
+#define CCTL_CMD_L2_PA_WB 0x3
+#define CCTL_CMD_L2_PA_WBINVAL 0x5
+#define CCTL_CMD_L2_SYNC 0xa
+
+/* CCTL_CMD_TYPE */
+#define CCTL_SINGLE_CMD 0
+#define CCTL_BLOCK_CMD 0x10
+#define CCTL_ALL_CMD 0x10
+
+/******************************************************************************
+ * L2_CA_CONF (Cache architecture configuration)
+ *****************************************************************************/
+#define L2_CA_CONF_offL2SET 0
+#define L2_CA_CONF_offL2WAY 4
+#define L2_CA_CONF_offL2CLSZ 8
+#define L2_CA_CONF_offL2DW 11
+#define L2_CA_CONF_offL2PT 14
+#define L2_CA_CONF_offL2VER 16
+
+#define L2_CA_CONF_mskL2SET (0xFUL << L2_CA_CONF_offL2SET)
+#define L2_CA_CONF_mskL2WAY (0xFUL << L2_CA_CONF_offL2WAY)
+#define L2_CA_CONF_mskL2CLSZ (0x7UL << L2_CA_CONF_offL2CLSZ)
+#define L2_CA_CONF_mskL2DW (0x7UL << L2_CA_CONF_offL2DW)
+#define L2_CA_CONF_mskL2PT (0x3UL << L2_CA_CONF_offL2PT)
+#define L2_CA_CONF_mskL2VER (0xFFFFUL << L2_CA_CONF_offL2VER)
+
+/******************************************************************************
+ * L2CC_SETUP (L2CC Setup register)
+ *****************************************************************************/
+#define L2CC_SETUP_offPART 0
+#define L2CC_SETUP_mskPART (0x3UL << L2CC_SETUP_offPART)
+#define L2CC_SETUP_offDDLATC 4
+#define L2CC_SETUP_mskDDLATC (0x3UL << L2CC_SETUP_offDDLATC)
+#define L2CC_SETUP_offTDLATC 8
+#define L2CC_SETUP_mskTDLATC (0x3UL << L2CC_SETUP_offTDLATC)
+
+/******************************************************************************
+ * L2CC_PROT (L2CC Protect register)
+ *****************************************************************************/
+#define L2CC_PROT_offMRWEN 31
+#define L2CC_PROT_mskMRWEN (0x1UL << L2CC_PROT_offMRWEN)
+
+/******************************************************************************
+ * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n)
+ *****************************************************************************/
+#define L2CC_CTRL_offEN 31
+#define L2CC_CTRL_mskEN (0x1UL << L2CC_CTRL_offEN)
+
+/******************************************************************************
+ * L2_CCTL_STATUS_Mn (The L2CCTL command working status for Master n)
+ *****************************************************************************/
+#define L2_CCTL_STATUS_offCMD_COMP 31
+#define L2_CCTL_STATUS_mskCMD_COMP (0x1 << L2_CCTL_STATUS_offCMD_COMP)
+
+extern void __iomem *atl2c_base;
+#include <linux/smp.h>
+#include <asm/io.h>
+#include <asm/bitfield.h>
+
+#define L2C_R_REG(offset) readl(atl2c_base + offset)
+#define L2C_W_REG(offset, value) writel(value, atl2c_base + offset)
+
+#define L2_CMD_RDY() \
+ do{;}while((L2C_R_REG(L2_CCTL_STATUS_OFF) & L2_CCTL_STATUS_mskCMD_COMP) == 0)
+
+static inline unsigned long L2_CACHE_SET(void)
+{
+ return 64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >>
+ L2_CA_CONF_offL2SET);
+}
+
+static inline unsigned long L2_CACHE_WAY(void)
+{
+ return 1 +
+ ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >>
+ L2_CA_CONF_offL2WAY);
+}
+
+static inline unsigned long L2_CACHE_LINE_SIZE(void)
+{
+
+ return 4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >>
+ L2_CA_CONF_offL2CLSZ);
+}
+
+static inline unsigned long GET_L2CC_CTRL_CPU(unsigned long cpu)
+{
+ if (cpu == smp_processor_id())
+ return L2C_R_REG(L2CC_CTRL_OFF);
+ return L2C_R_REG(L2CC_CTRL_OFF + (cpu << 8));
+}
+
+static inline void SET_L2CC_CTRL_CPU(unsigned long cpu, unsigned long val)
+{
+ if (cpu == smp_processor_id())
+ L2C_W_REG(L2CC_CTRL_OFF, val);
+ else
+ L2C_W_REG(L2CC_CTRL_OFF + (cpu << 8), val);
+}
+
+static inline unsigned long GET_L2CC_STATUS_CPU(unsigned long cpu)
+{
+ if (cpu == smp_processor_id())
+ return L2C_R_REG(L2_CCTL_STATUS_OFF);
+ return L2C_R_REG(L2_CCTL_STATUS_OFF + (cpu << 8));
+}
+#endif
diff --git a/arch/nds32/include/asm/linkage.h b/arch/nds32/include/asm/linkage.h
new file mode 100644
index 000000000000..e708c8bdb926
--- /dev/null
+++ b/arch/nds32/include/asm/linkage.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+/* This file is required by include/linux/linkage.h */
+#define __ALIGN .align 2
+#define __ALIGN_STR ".align 2"
+
+#endif
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
new file mode 100644
index 000000000000..60efc726b56e
--- /dev/null
+++ b/arch/nds32/include/asm/memory.h
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_MEMORY_H
+#define __ASM_NDS32_MEMORY_H
+
+#include <linux/compiler.h>
+#include <linux/sizes.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/page.h>
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET (0x0)
+#endif
+
+#ifndef __virt_to_bus
+#define __virt_to_bus __virt_to_phys
+#endif
+
+#ifndef __bus_to_virt
+#define __bus_to_virt __phys_to_virt
+#endif
+
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
+ */
+#define TASK_SIZE ((CONFIG_PAGE_OFFSET) - (SZ_32M))
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_32M)
+#define PAGE_OFFSET (CONFIG_PAGE_OFFSET)
+
+/*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULES_END (PAGE_OFFSET)
+#define MODULES_VADDR (MODULES_END - SZ_32M)
+
+#if TASK_SIZE > MODULES_VADDR
+#error Top of user space clashes with start of module space
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view. We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * Drivers should NOT use these either.
+ */
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE(). It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ * pfn_valid(pfn) indicates whether a PFN number is valid
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#ifndef CONFIG_DISCONTIGMEM
+
+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
+
+#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
+#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+
+#else /* CONFIG_DISCONTIGMEM */
+#error CONFIG_DISCONTIGMEM is not supported yet.
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#endif
+
+#include <asm-generic/memory_model.h>
+
+#endif
diff --git a/arch/nds32/include/asm/mmu.h b/arch/nds32/include/asm/mmu.h
new file mode 100644
index 000000000000..88b9ee8c1064
--- /dev/null
+++ b/arch/nds32/include/asm/mmu.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_MMU_H
+#define __NDS32_MMU_H
+
+typedef struct {
+ unsigned int id;
+ void *vdso;
+} mm_context_t;
+
+#endif
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h
new file mode 100644
index 000000000000..fd7d13cefccc
--- /dev/null
+++ b/arch/nds32/include/asm/mmu_context.h
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_MMU_CONTEXT_H
+#define __ASM_NDS32_MMU_CONTEXT_H
+
+#include <linux/spinlock.h>
+#include <asm/tlbflush.h>
+#include <asm/proc-fns.h>
+#include <asm-generic/mm_hooks.h>
+
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.id = 0;
+ return 0;
+}
+
+#define destroy_context(mm) do { } while(0)
+
+#define CID_BITS 9
+extern spinlock_t cid_lock;
+extern unsigned int cpu_last_cid;
+
+static inline void __new_context(struct mm_struct *mm)
+{
+ unsigned int cid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cid_lock, flags);
+ cid = cpu_last_cid;
+ cpu_last_cid += 1 << TLB_MISC_offCID;
+ if (cpu_last_cid == 0)
+ cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS;
+
+ if ((cid & TLB_MISC_mskCID) == 0)
+ flush_tlb_all();
+ spin_unlock_irqrestore(&cid_lock, flags);
+
+ mm->context.id = cid;
+}
+
+static inline void check_context(struct mm_struct *mm)
+{
+ if (unlikely
+ ((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
+ __new_context(mm);
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+ check_context(next);
+ cpu_switch_mm(next);
+ }
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+
+#endif
diff --git a/arch/nds32/include/asm/module.h b/arch/nds32/include/asm/module.h
new file mode 100644
index 000000000000..16cf9c7237ad
--- /dev/null
+++ b/arch/nds32/include/asm/module.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASM_NDS32_MODULE_H
+#define _ASM_NDS32_MODULE_H
+
+#include <asm-generic/module.h>
+
+#define MODULE_ARCH_VERMAGIC "NDS32v3"
+
+#endif /* _ASM_NDS32_MODULE_H */
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h
new file mode 100644
index 000000000000..19b19394a936
--- /dev/null
+++ b/arch/nds32/include/asm/nds32.h
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASM_NDS32_NDS32_H_
+#define _ASM_NDS32_NDS32_H_
+
+#include <asm/bitfield.h>
+#include <asm/cachectl.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/init.h>
+#include <asm/barrier.h>
+#include <nds32_intrinsic.h>
+
+#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define FP_OFFSET (-3)
+#else
+#define FP_OFFSET (-2)
+#endif
+
+extern void __init early_trap_init(void);
+static inline void GIE_ENABLE(void)
+{
+ mb();
+ __nds32__gie_en();
+}
+
+static inline void GIE_DISABLE(void)
+{
+ mb();
+ __nds32__gie_dis();
+}
+
+static inline unsigned long CACHE_SET(unsigned char cache)
+{
+
+ if (cache == ICACHE)
+ return 64 << ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISET) >>
+ ICM_CFG_offISET);
+ else
+ return 64 << ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSET) >>
+ DCM_CFG_offDSET);
+}
+
+static inline unsigned long CACHE_WAY(unsigned char cache)
+{
+
+ if (cache == ICACHE)
+ return 1 +
+ ((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskIWAY) >> ICM_CFG_offIWAY);
+ else
+ return 1 +
+ ((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDWAY) >> DCM_CFG_offDWAY);
+}
+
+static inline unsigned long CACHE_LINE_SIZE(unsigned char cache)
+{
+
+ if (cache == ICACHE)
+ return 8 <<
+ (((__nds32__mfsr(NDS32_SR_ICM_CFG) & ICM_CFG_mskISZ) >> ICM_CFG_offISZ) - 1);
+ else
+ return 8 <<
+ (((__nds32__mfsr(NDS32_SR_DCM_CFG) & DCM_CFG_mskDSZ) >> DCM_CFG_offDSZ) - 1);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define IVB_BASE PHYS_OFFSET /* in user space for intr/exc/trap/break table base, 64KB aligned
+ * We defined at the start of the physical memory */
+
+/* dispatched sub-entry exception handler numbering */
+#define RD_PROT 0 /* read protrection */
+#define WRT_PROT 1 /* write protection */
+#define NOEXEC 2 /* non executable */
+#define PAGE_MODIFY 3 /* page modified */
+#define ACC_BIT 4 /* access bit */
+#define RESVED_PTE 5 /* reserved PTE attribute */
+/* reserved 6 ~ 16 */
+
+#endif /* _ASM_NDS32_NDS32_H_ */
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h
new file mode 100644
index 000000000000..e27365c097b6
--- /dev/null
+++ b/arch/nds32/include/asm/page.h
@@ -0,0 +1,67 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2005-2017 Andes Technology Corporation
+ */
+
+#ifndef _ASMNDS32_PAGE_H
+#define _ASMNDS32_PAGE_H
+
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+#define PAGE_SHIFT 12
+#endif
+#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
+#define PAGE_SHIFT 13
+#endif
+#include <linux/const.h>
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+struct page;
+struct vm_area_struct;
+#ifdef CONFIG_CPU_CACHE_ALIASING
+extern void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+extern void clear_user_highpage(struct page *page, unsigned long vaddr);
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+#define clear_user_highpage clear_user_highpage
+#else
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+typedef struct page *pgtable_t;
+
+#include <asm/memory.h>
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
new file mode 100644
index 000000000000..27448869131a
--- /dev/null
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMNDS32_PGALLOC_H
+#define _ASMNDS32_PGALLOC_H
+
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/proc-fns.h>
+
+/*
+ * Since we have only two-level page tables, these are trivial
+ */
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free(mm, pmd) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
+
+#define check_pgt_cache() do { } while (0)
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long addr)
+{
+ pte_t *pte;
+
+ pte =
+ (pte_t *) __get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_ZERO);
+
+ return pte;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pgtable_t pte;
+
+ pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO, 0);
+ if (pte)
+ cpu_dcache_wb_page((unsigned long)page_address(pte));
+
+ return pte;
+}
+
+/*
+ * Free one PTE table.
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t * pte)
+{
+ if (pte) {
+ free_page((unsigned long)pte);
+ }
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ __free_page(pte);
+}
+
+/*
+ * Populate the pmdp entry with a pointer to the pte. This pmd is part
+ * of the mm address space.
+ *
+ * Ensure that we always set both PMD entries.
+ */
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmdp, pte_t * ptep)
+{
+ unsigned long pte_ptr = (unsigned long)ptep;
+ unsigned long pmdval;
+
+ BUG_ON(mm != &init_mm);
+
+ /*
+ * The pmd must be loaded with the physical
+ * address of the PTE table
+ */
+ pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
+ set_pmd(pmdp, __pmd(pmdval));
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t * pmdp, pgtable_t ptep)
+{
+ unsigned long pmdval;
+
+ BUG_ON(mm == &init_mm);
+
+ pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
+ set_pmd(pmdp, __pmd(pmdval));
+}
+
+#endif
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
new file mode 100644
index 000000000000..6783937edbeb
--- /dev/null
+++ b/arch/nds32/include/asm/pgtable.h
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMNDS32_PGTABLE_H
+#define _ASMNDS32_PGTABLE_H
+
+#define __PAGETABLE_PMD_FOLDED
+#include <asm-generic/4level-fixup.h>
+#include <asm-generic/sizes.h>
+
+#include <asm/memory.h>
+#include <asm/nds32.h>
+#ifndef __ASSEMBLY__
+#include <asm/fixmap.h>
+#include <asm/io.h>
+#include <nds32_intrinsic.h>
+#endif
+
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+#define PMD_SHIFT 22
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE 1024
+#endif
+
+#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
+#define PGDIR_SHIFT 24
+#define PTRS_PER_PGD 256
+#define PMD_SHIFT 24
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE 2048
+#endif
+
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, unsigned long val);
+extern void __pmd_error(const char *file, int line, unsigned long val);
+extern void __pgd_error(const char *file, int line, unsigned long val);
+
+#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
+#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#endif /* !__ASSEMBLY__ */
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define FIRST_USER_ADDRESS 0x8000
+
+#ifdef CONFIG_HIGHMEM
+#define CONSISTENT_BASE ((PKMAP_BASE) - (SZ_2M))
+#define CONSISTENT_END (PKMAP_BASE)
+#else
+#define CONSISTENT_BASE (FIXADDR_START - SZ_2M)
+#define CONSISTENT_END (FIXADDR_START)
+#endif
+#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+
+#ifdef CONFIG_HIGHMEM
+#ifndef __ASSEMBLY__
+#include <asm/highmem.h>
+#endif
+#endif
+
+#define VMALLOC_RESERVE SZ_128M
+#define VMALLOC_END (CONSISTENT_BASE - PAGE_SIZE)
+#define VMALLOC_START ((VMALLOC_END) - VMALLOC_RESERVE)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define MAXMEM __pa(VMALLOC_START)
+#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+
+#define FIRST_USER_PGD_NR 0
+#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + FIRST_USER_PGD_NR)
+
+/* L2 PTE */
+#define _PAGE_V (1UL << 0)
+
+#define _PAGE_M_XKRW (0UL << 1)
+#define _PAGE_M_UR_KR (1UL << 1)
+#define _PAGE_M_UR_KRW (2UL << 1)
+#define _PAGE_M_URW_KRW (3UL << 1)
+#define _PAGE_M_KR (5UL << 1)
+#define _PAGE_M_KRW (7UL << 1)
+
+#define _PAGE_D (1UL << 4)
+#define _PAGE_E (1UL << 5)
+#define _PAGE_A (1UL << 6)
+#define _PAGE_G (1UL << 7)
+
+#define _PAGE_C_DEV (0UL << 8)
+#define _PAGE_C_DEV_WB (1UL << 8)
+#define _PAGE_C_MEM (2UL << 8)
+#define _PAGE_C_MEM_SHRD_WB (4UL << 8)
+#define _PAGE_C_MEM_SHRD_WT (5UL << 8)
+#define _PAGE_C_MEM_WB (6UL << 8)
+#define _PAGE_C_MEM_WT (7UL << 8)
+
+#define _PAGE_L (1UL << 11)
+
+#define _HAVE_PAGE_L (_PAGE_L)
+#define _PAGE_FILE (1UL << 1)
+#define _PAGE_YOUNG 0
+#define _PAGE_M_MASK _PAGE_M_KRW
+#define _PAGE_C_MASK _PAGE_C_MEM_WT
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WT
+#else
+#define _PAGE_CACHE_SHRD _PAGE_C_MEM_SHRD_WB
+#endif
+#else
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WT
+#else
+#define _PAGE_CACHE_SHRD _PAGE_C_MEM_WB
+#endif
+#endif
+
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define _PAGE_CACHE _PAGE_C_MEM_WT
+#else
+#define _PAGE_CACHE _PAGE_C_MEM_WB
+#endif
+
+/*
+ * + Level 1 descriptor (PMD)
+ */
+#define PMD_TYPE_TABLE 0
+
+#ifndef __ASSEMBLY__
+
+#define _PAGE_USER_TABLE PMD_TYPE_TABLE
+#define _PAGE_KERNEL_TABLE PMD_TYPE_TABLE
+
+#define PAGE_EXEC __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_E)
+#define PAGE_NONE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_A)
+#define PAGE_READ __pgprot(_PAGE_V | _PAGE_M_UR_KR)
+#define PAGE_RDWR __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D)
+#define PAGE_COPY __pgprot(_PAGE_V | _PAGE_M_UR_KR)
+
+#define PAGE_UXKRWX_V1 __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
+#define PAGE_UXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_XKRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
+#define PAGE_URXKRWX_V2 __pgprot(_PAGE_V | _PAGE_M_UR_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
+#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
+#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
+#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
+#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
+#endif /* __ASSEMBLY__ */
+
+/* xwr */
+#define __P000 (PAGE_NONE | _PAGE_CACHE_SHRD)
+#define __P001 (PAGE_READ | _PAGE_CACHE_SHRD)
+#define __P010 (PAGE_COPY | _PAGE_CACHE_SHRD)
+#define __P011 (PAGE_COPY | _PAGE_CACHE_SHRD)
+#define __P100 (PAGE_EXEC | _PAGE_CACHE_SHRD)
+#define __P101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
+#define __P110 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
+#define __P111 (PAGE_COPY | _PAGE_E | _PAGE_CACHE_SHRD)
+
+#define __S000 (PAGE_NONE | _PAGE_CACHE_SHRD)
+#define __S001 (PAGE_READ | _PAGE_CACHE_SHRD)
+#define __S010 (PAGE_RDWR | _PAGE_CACHE_SHRD)
+#define __S011 (PAGE_RDWR | _PAGE_CACHE_SHRD)
+#define __S100 (PAGE_EXEC | _PAGE_CACHE_SHRD)
+#define __S101 (PAGE_READ | _PAGE_E | _PAGE_CACHE_SHRD)
+#define __S110 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
+#define __S111 (PAGE_RDWR | _PAGE_E | _PAGE_CACHE_SHRD)
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+extern void paging_init(void);
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+#define pte_none(pte) !(pte_val(pte))
+#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
+#define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address)
+
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+/*
+ * Set a level 1 translation table entry, and clean it out of
+ * any caches such that the MMUs can load it correctly.
+ */
+static inline void set_pmd(pmd_t * pmdp, pmd_t pmd)
+{
+
+ *pmdp = pmd;
+#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (pmdp):"memory");
+ __nds32__msync_all();
+ __nds32__dsb();
+#endif
+}
+
+/*
+ * Set a PTE and flush it out
+ */
+static inline void set_pte(pte_t * ptep, pte_t pte)
+{
+
+ *ptep = pte;
+#if !defined(CONFIG_CPU_DCACHE_DISABLE) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (ptep):"memory");
+ __nds32__msync_all();
+ __nds32__dsb();
+#endif
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+/*
+ * pte_write: this page is writeable for user mode
+ * pte_read: this page is readable for user mode
+ * pte_kernel_write: this page is writeable for kernel mode
+ *
+ * We don't have pte_kernel_read because kernel always can read.
+ *
+ * */
+
+#define pte_present(pte) (pte_val(pte) & _PAGE_V)
+#define pte_write(pte) ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW)
+#define pte_read(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KR) || \
+ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
+ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW))
+#define pte_kernel_write(pte) (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_URW_KRW) || \
+ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_UR_KRW) || \
+ ((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_KRW) || \
+ (((pte_val(pte) & _PAGE_M_MASK) == _PAGE_M_XKRW) && pte_exec(pte)))
+#define pte_exec(pte) (pte_val(pte) & _PAGE_E)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_D)
+#define pte_young(pte) (pte_val(pte) & _PAGE_YOUNG)
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+#define pte_file(pte) (pte_val(pte) & _PAGE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pgoff_to_pte(x) __pte(((x) << 2) | _PAGE_FILE)
+
+#define PTE_FILE_MAX_BITS 29
+
+#define PTE_BIT_FUNC(fn,op) \
+static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
+ pte_val(pte) = pte_val(pte) | _PAGE_M_UR_KR;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) = pte_val(pte) & ~_PAGE_M_MASK;
+ pte_val(pte) = pte_val(pte) | _PAGE_M_URW_KRW;
+ return pte;
+}
+
+PTE_BIT_FUNC(exprotect, &=~_PAGE_E);
+PTE_BIT_FUNC(mkexec, |=_PAGE_E);
+PTE_BIT_FUNC(mkclean, &=~_PAGE_D);
+PTE_BIT_FUNC(mkdirty, |=_PAGE_D);
+PTE_BIT_FUNC(mkold, &=~_PAGE_YOUNG);
+PTE_BIT_FUNC(mkyoung, |=_PAGE_YOUNG);
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV)
+#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot)&~_PAGE_C_MASK) | _PAGE_C_DEV_WB)
+
+#define pmd_none(pmd) (pmd_val(pmd)&0x1)
+#define pmd_present(pmd) (!pmd_none(pmd))
+#define pmd_bad(pmd) pmd_none(pmd)
+
+#define copy_pmd(pmdpd,pmdps) set_pmd((pmdpd), *(pmdps))
+#define pmd_clear(pmdp) set_pmd((pmdp), __pmd(1))
+
+static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
+{
+ unsigned long ptr = (unsigned long)ptep;
+ pmd_t pmd;
+
+ /*
+ * The pmd must be loaded with the physical
+ * address of the PTE table
+ */
+
+ pmd_val(pmd) = __virt_to_phys(ptr) | prot;
+ return pmd;
+}
+
+#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
+
+/*
+ * Permanent address of a page. We never have highmem, so this is trivial.
+ */
+#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_present(pgd) (1)
+#define pgd_clear(pgdp) do { } while (0)
+
+#define page_pte_prot(page,prot) mk_pte(page, prot)
+#define page_pte(page) mk_pte(page, __pgprot(0))
+/*
+ * L1PTE = $mr1 + ((virt >> PMD_SHIFT) << 2);
+ * L2PTE = (((virt >> PAGE_SHIFT) & (PTRS_PER_PTE -1 )) << 2);
+ * PPN = (phys & 0xfffff000);
+ *
+*/
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir, addr) ((pmd_t *)(dir))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ const unsigned long mask = 0xfff;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/* Encode and decode a swap entry.
+ *
+ * We support up to 32GB of swap on 4k machines
+ */
+#define __swp_type(x) (((x).val >> 2) & 0x7f)
+#define __swp_offset(x) ((x).val >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr) (1)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * We provide our own arch_get_unmapped_area to cope with VIPT caches.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+/*
+ * remap a physical address `phys' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASMNDS32_PGTABLE_H */
diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h
new file mode 100644
index 000000000000..bedc4f59e064
--- /dev/null
+++ b/arch/nds32/include/asm/proc-fns.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_PROCFNS_H__
+#define __NDS32_PROCFNS_H__
+
+#ifdef __KERNEL__
+#include <asm/page.h>
+
+struct mm_struct;
+struct vm_area_struct;
+extern void cpu_proc_init(void);
+extern void cpu_proc_fin(void);
+extern void cpu_do_idle(void);
+extern void cpu_reset(unsigned long reset);
+extern void cpu_switch_mm(struct mm_struct *mm);
+
+extern void cpu_dcache_inval_all(void);
+extern void cpu_dcache_wbinval_all(void);
+extern void cpu_dcache_inval_page(unsigned long page);
+extern void cpu_dcache_wb_page(unsigned long page);
+extern void cpu_dcache_wbinval_page(unsigned long page);
+extern void cpu_dcache_inval_range(unsigned long start, unsigned long end);
+extern void cpu_dcache_wb_range(unsigned long start, unsigned long end);
+extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end);
+
+extern void cpu_icache_inval_all(void);
+extern void cpu_icache_inval_page(unsigned long page);
+extern void cpu_icache_inval_range(unsigned long start, unsigned long end);
+
+extern void cpu_cache_wbinval_page(unsigned long page, int flushi);
+extern void cpu_cache_wbinval_range(unsigned long start,
+ unsigned long end, int flushi);
+extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, bool flushi,
+ bool wbd);
+
+extern void cpu_dma_wb_range(unsigned long start, unsigned long end);
+extern void cpu_dma_inval_range(unsigned long start, unsigned long end);
+extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end);
+
+#endif /* __KERNEL__ */
+#endif /* __NDS32_PROCFNS_H__ */
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h
new file mode 100644
index 000000000000..9c83caf4269f
--- /dev/null
+++ b/arch/nds32/include/asm/processor.h
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_PROCESSOR_H
+#define __ASM_NDS32_PROCESSOR_H
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+
+#define KERNEL_STACK_SIZE PAGE_SIZE
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+
+struct cpu_context {
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long fp;
+ unsigned long pc;
+ unsigned long sp;
+};
+
+struct thread_struct {
+ struct cpu_context cpu_context; /* cpu context */
+ /* fault info */
+ unsigned long address;
+ unsigned long trap_no;
+ unsigned long error_code;
+};
+
+#define INIT_THREAD { }
+
+#ifdef __NDS32_EB__
+#define PSW_DE PSW_mskBE
+#else
+#define PSW_DE 0x0
+#endif
+
+#ifdef CONFIG_WBNA
+#define PSW_valWBNA PSW_mskWBNA
+#else
+#define PSW_valWBNA 0x0
+#endif
+
+#ifdef CONFIG_HWZOL
+#define PSW_valINIT (PSW_CPL_ANY | PSW_mskAEN | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE)
+#else
+#define PSW_valINIT (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_mskGIE)
+#endif
+
+#define start_thread(regs,pc,stack) \
+({ \
+ memzero(regs, sizeof(struct pt_regs)); \
+ forget_syscall(regs); \
+ regs->ipsw = PSW_valINIT; \
+ regs->ir0 = (PSW_CPL_ANY | PSW_valWBNA | PSW_mskDT | PSW_mskIT | PSW_DE | PSW_SYSTEM | PSW_INTL_1); \
+ regs->ipc = pc; \
+ regs->sp = stack; \
+})
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define cpu_relax() barrier()
+
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+ - 8) - 1)
+
+/*
+ * Create a new kernel thread
+ */
+extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags);
+
+#define KSTK_EIP(tsk) instruction_pointer(task_pt_regs(tsk))
+#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
+
+#endif
+
+#endif /* __ASM_NDS32_PROCESSOR_H */
diff --git a/arch/nds32/include/asm/ptrace.h b/arch/nds32/include/asm/ptrace.h
new file mode 100644
index 000000000000..c4538839055c
--- /dev/null
+++ b/arch/nds32/include/asm/ptrace.h
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_PTRACE_H
+#define __ASM_NDS32_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+/*
+ * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
+ * a syscall -- i.e., its most recent entry into the kernel from
+ * userspace was not via syscall, or otherwise a tracer cancelled the
+ * syscall.
+ *
+ * This must have the value -1, for ABI compatibility with ptrace etc.
+ */
+#define NO_SYSCALL (-1)
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+struct pt_regs {
+ union {
+ struct user_pt_regs user_regs;
+ struct {
+ long uregs[26];
+ long fp;
+ long gp;
+ long lp;
+ long sp;
+ long ipc;
+#if defined(CONFIG_HWZOL)
+ long lb;
+ long le;
+ long lc;
+#else
+ long dummy[3];
+#endif
+ long syscallno;
+ };
+ };
+ long orig_r0;
+ long ir0;
+ long ipsw;
+ long pipsw;
+ long pipc;
+ long pp0;
+ long pp1;
+ long fucop_ctl;
+ long osp;
+};
+
+static inline bool in_syscall(struct pt_regs const *regs)
+{
+ return regs->syscallno != NO_SYSCALL;
+}
+
+static inline void forget_syscall(struct pt_regs *regs)
+{
+ regs->syscallno = NO_SYSCALL;
+}
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->uregs[0];
+}
+extern void show_regs(struct pt_regs *);
+/* Avoid circular header include via sched.h */
+struct task_struct;
+
+#define arch_has_single_step() (1)
+#define user_mode(regs) (((regs)->ipsw & PSW_mskPOM) == 0)
+#define interrupts_enabled(regs) (!!((regs)->ipsw & PSW_mskGIE))
+#define user_stack_pointer(regs) ((regs)->sp)
+#define instruction_pointer(regs) ((regs)->ipc)
+#define profile_pc(regs) instruction_pointer(regs)
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/nds32/include/asm/shmparam.h b/arch/nds32/include/asm/shmparam.h
new file mode 100644
index 000000000000..fd1cff64b68e
--- /dev/null
+++ b/arch/nds32/include/asm/shmparam.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMNDS32_SHMPARAM_H
+#define _ASMNDS32_SHMPARAM_H
+
+/*
+ * This should be the size of the virtually indexed cache/ways,
+ * whichever is greater since the cache aliases every size/ways
+ * bytes.
+ */
+#define SHMLBA (4 * SZ_8K) /* attach addr a multiple of this */
+
+/*
+ * Enforce SHMLBA in shmat
+ */
+#define __ARCH_FORCE_SHMLBA
+
+#endif /* _ASMNDS32_SHMPARAM_H */
diff --git a/arch/nds32/include/asm/string.h b/arch/nds32/include/asm/string.h
new file mode 100644
index 000000000000..179272caa540
--- /dev/null
+++ b/arch/nds32/include/asm/string.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_STRING_H
+#define __ASM_NDS32_STRING_H
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
+extern void *memzero(void *ptr, __kernel_size_t n);
+#endif
diff --git a/arch/nds32/include/asm/swab.h b/arch/nds32/include/asm/swab.h
new file mode 100644
index 000000000000..e01a755a37d2
--- /dev/null
+++ b/arch/nds32/include/asm/swab.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_SWAB_H__
+#define __NDS32_SWAB_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ __asm__("wsbh %0, %0\n\t" /* word swap byte within halfword */
+ "rotri %0, %0, #16\n"
+ :"=r"(x)
+ :"0"(x));
+ return x;
+}
+
+static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
+{
+ __asm__("wsbh %0, %0\n" /* word swap byte within halfword */
+ :"=r"(x)
+ :"0"(x));
+ return x;
+}
+
+#define __arch_swab32(x) ___arch__swab32(x)
+#define __arch_swab16(x) ___arch__swab16(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __BYTEORDER_HAS_U64__
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif /* __NDS32_SWAB_H__ */
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
new file mode 100644
index 000000000000..f7e5e86765fe
--- /dev/null
+++ b/arch/nds32/include/asm/syscall.h
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASM_NDS32_SYSCALL_H
+#define _ASM_NDS32_SYSCALL_H 1
+
+#include <linux/err.h>
+struct task_struct;
+struct pt_regs;
+
+/**
+ * syscall_get_nr - find what system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * If @task is executing a system call or is at system call
+ * tracing about to attempt one, returns the system call number.
+ * If @task is not executing a system call, i.e. it's blocked
+ * inside the kernel for a fault or signal, returns -1.
+ *
+ * Note this returns int even on 64-bit machines. Only 32 bits of
+ * system call number can be meaningful. If the actual arch value
+ * is 64 bits, this truncates to 32 bits so 0xffffffff means -1.
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->syscallno;
+}
+
+/**
+ * syscall_rollback - roll back registers after an aborted system call
+ * @task: task of interest, must be in system call exit tracing
+ * @regs: task_pt_regs() of @task
+ *
+ * It's only valid to call this when @task is stopped for system
+ * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
+ * after tracehook_report_syscall_entry() returned nonzero to prevent
+ * the system call from taking place.
+ *
+ * This rolls back the register state in @regs so it's as if the
+ * system call instruction was a no-op. The registers containing
+ * the system call number and arguments are as they were before the
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+void syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->uregs[0] = regs->orig_r0;
+}
+
+/**
+ * syscall_get_error - check result of traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long error = regs->uregs[0];
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+/**
+ * syscall_get_return_value - get the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns the return value of the successful system call.
+ * This value is meaningless if syscall_get_error() returned nonzero.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->uregs[0];
+}
+
+/**
+ * syscall_set_return_value - change the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @error: negative error code, or zero to indicate success
+ * @val: user return value if @error is zero
+ *
+ * This changes the results of the system call that user mode will see.
+ * If @error is zero, the user sees a successful system call with a
+ * return value of @val. If @error is nonzero, it's a negated errno
+ * code; the user sees a failed system call with this errno code.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->uregs[0] = (long)error ? error : val;
+}
+
+/**
+ * syscall_get_arguments - extract system call parameter values
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array filled with argument values
+ *
+ * Fetches @n arguments to the system call starting with the @i'th argument
+ * (from 0 through 5). Argument @i is stored in @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+#define SYSCALL_MAX_ARGS 6
+void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ if (n == 0)
+ return;
+ if (i + n > SYSCALL_MAX_ARGS) {
+ unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+ unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ memset(args_bad, 0, n_bad * sizeof(args[0]));
+ memset(args_bad, 0, n_bad * sizeof(args[0]));
+ }
+
+ if (i == 0) {
+ args[0] = regs->orig_r0;
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
+}
+
+/**
+ * syscall_set_arguments - change system call parameter value
+ * @task: task of interest, must be in system call entry tracing
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array of argument values to store
+ *
+ * Changes @n arguments to the system call starting with the @i'th argument.
+ * Argument @i gets value @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ if (n == 0)
+ return;
+
+ if (i + n > SYSCALL_MAX_ARGS) {
+ pr_warn("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ n = SYSCALL_MAX_ARGS - i;
+ }
+
+ if (i == 0) {
+ regs->orig_r0 = args[0];
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
+}
+#endif /* _ASM_NDS32_SYSCALL_H */
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h
new file mode 100644
index 000000000000..78778ecff60c
--- /dev/null
+++ b/arch/nds32/include/asm/syscalls.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_SYSCALLS_H
+#define __ASM_NDS32_SYSCALLS_H
+
+asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op);
+asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len);
+asmlinkage long sys_rt_sigreturn_wrapper(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_NDS32_SYSCALLS_H */
diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h
new file mode 100644
index 000000000000..bff741ff337b
--- /dev/null
+++ b/arch/nds32/include/asm/thread_info.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_THREAD_INFO_H
+#define __ASM_NDS32_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#define THREAD_SIZE_ORDER (1)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+typedef unsigned long mm_segment_t;
+
+/*
+ * low level task data that entry.S needs immediate access to.
+ * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ __s32 preempt_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+};
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+#define thread_saved_pc(tsk) ((unsigned long)(tsk->thread.cpu_context.pc))
+#define thread_saved_fp(tsk) ((unsigned long)(tsk->thread.cpu_context.fp))
+#endif
+
+/*
+ * thread information flags:
+ * TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_SIGPENDING - signal pending
+ * TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
+ * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
+ * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
+ */
+#define TIF_SIGPENDING 1
+#define TIF_NEED_RESCHED 2
+#define TIF_SINGLESTEP 3
+#define TIF_NOTIFY_RESUME 4 /* callback before returning to user */
+#define TIF_SYSCALL_TRACE 8
+#define TIF_USEDFPU 16
+#define TIF_POLLING_NRFLAG 17
+#define TIF_MEMDIE 18
+#define TIF_FREEZE 19
+#define TIF_RESTORE_SIGMASK 20
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
+/*
+ * Change these and you break ASM code in entry-common.S
+ */
+#define _TIF_WORK_MASK 0x000000ff
+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)
+#define _TIF_WORK_SYSCALL_LEAVE (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_NDS32_THREAD_INFO_H */
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
new file mode 100644
index 000000000000..b35ae5eae3ab
--- /dev/null
+++ b/arch/nds32/include/asm/tlb.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASMNDS32_TLB_H
+#define __ASMNDS32_TLB_H
+
+#define tlb_start_vma(tlb,vma) \
+ do { \
+ if (!tlb->fullmm) \
+ flush_cache_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define tlb_end_vma(tlb,vma) \
+ do { \
+ if(!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+ } while (0)
+
+#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd)
+
+#endif
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
new file mode 100644
index 000000000000..9b411f401903
--- /dev/null
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMNDS32_TLBFLUSH_H
+#define _ASMNDS32_TLBFLUSH_H
+
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <nds32_intrinsic.h>
+
+static inline void local_flush_tlb_all(void)
+{
+ __nds32__tlbop_flua();
+ __nds32__isb();
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ __nds32__tlbop_flua();
+ __nds32__isb();
+}
+
+static inline void local_flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ while (start < end) {
+ __nds32__tlbop_inv(start);
+ __nds32__isb();
+ start += PAGE_SIZE;
+ }
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_range local_flush_tlb_range
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+
+void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t * pte);
+void tlb_migrate_finish(struct mm_struct *mm);
+
+#endif
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
new file mode 100644
index 000000000000..18a009f3804d
--- /dev/null
+++ b/arch/nds32/include/asm/uaccess.h
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMANDES_UACCESS_H
+#define _ASMANDES_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <asm/memory.h>
+#include <asm/types.h>
+#include <linux/mm.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#define KERNEL_DS ((mm_segment_t) { ~0UL })
+#define USER_DS ((mm_segment_t) {TASK_SIZE - 1})
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define user_addr_max get_fs
+
+static inline void set_fs(mm_segment_t fs)
+{
+ current_thread_info()->addr_limit = fs;
+}
+
+#define segment_eq(a, b) ((a) == (b))
+
+#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
+
+#define access_ok(type, addr, size) \
+ __range_ok((unsigned long)addr, (unsigned long)size)
+/*
+ * Single-value transfer routines. They automatically use the right
+ * size if we just have the right pointer type. Note that the functions
+ * which read from user space (*get_*) need to take care not to leak
+ * kernel data even if the calling code is buggy and fails to check
+ * the return value. This means zeroing out the destination variable
+ * or buffer on error. Normally this is done out of line by the
+ * fixup code, but there are a few places where it intrudes on the
+ * main code path. When we only write to user space, there is no
+ * problem.
+ *
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success. Note that these
+ * versions are void (ie, don't return a value as such).
+ */
+
+#define get_user(x,p) \
+({ \
+ long __e = -EFAULT; \
+ if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \
+ __e = __get_user(x,p); \
+ } else \
+ x = 0; \
+ __e; \
+})
+#define __get_user(x,ptr) \
+({ \
+ long __gu_err = 0; \
+ __get_user_err((x),(ptr),__gu_err); \
+ __gu_err; \
+})
+
+#define __get_user_error(x,ptr,err) \
+({ \
+ __get_user_err((x),(ptr),err); \
+ (void) 0; \
+})
+
+#define __get_user_err(x,ptr,err) \
+do { \
+ unsigned long __gu_addr = (unsigned long)(ptr); \
+ unsigned long __gu_val; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_asm("lbi",__gu_val,__gu_addr,err); \
+ break; \
+ case 2: \
+ __get_user_asm("lhi",__gu_val,__gu_addr,err); \
+ break; \
+ case 4: \
+ __get_user_asm("lwi",__gu_val,__gu_addr,err); \
+ break; \
+ case 8: \
+ __get_user_asm_dword(__gu_val,__gu_addr,err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ break; \
+ } \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+} while (0)
+
+#define __get_user_asm(inst,x,addr,err) \
+ asm volatile( \
+ "1: "inst" %1,[%2]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: move %0, %3\n" \
+ " move %1, #0\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (x) \
+ : "r" (addr), "i" (-EFAULT) \
+ : "cc")
+
+#ifdef __NDS32_EB__
+#define __gu_reg_oper0 "%H1"
+#define __gu_reg_oper1 "%L1"
+#else
+#define __gu_reg_oper0 "%L1"
+#define __gu_reg_oper1 "%H1"
+#endif
+
+#define __get_user_asm_dword(x, addr, err) \
+ asm volatile( \
+ "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \
+ "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: move %0, %3\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous" \
+ : "+r"(err), "=&r"(x) \
+ : "r"(addr), "i"(-EFAULT) \
+ : "cc")
+#define put_user(x,p) \
+({ \
+ long __e = -EFAULT; \
+ if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \
+ __e = __put_user(x,p); \
+ } \
+ __e; \
+})
+#define __put_user(x,ptr) \
+({ \
+ long __pu_err = 0; \
+ __put_user_err((x),(ptr),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_error(x,ptr,err) \
+({ \
+ __put_user_err((x),(ptr),err); \
+ (void) 0; \
+})
+
+#define __put_user_err(x,ptr,err) \
+do { \
+ unsigned long __pu_addr = (unsigned long)(ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_asm("sbi",__pu_val,__pu_addr,err); \
+ break; \
+ case 2: \
+ __put_user_asm("shi",__pu_val,__pu_addr,err); \
+ break; \
+ case 4: \
+ __put_user_asm("swi",__pu_val,__pu_addr,err); \
+ break; \
+ case 8: \
+ __put_user_asm_dword(__pu_val,__pu_addr,err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ break; \
+ } \
+} while (0)
+
+#define __put_user_asm(inst,x,addr,err) \
+ asm volatile( \
+ "1: "inst" %1,[%2]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: move %0, %3\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err) \
+ : "r" (x), "r" (addr), "i" (-EFAULT) \
+ : "cc")
+
+#ifdef __NDS32_EB__
+#define __pu_reg_oper0 "%H2"
+#define __pu_reg_oper1 "%L2"
+#else
+#define __pu_reg_oper0 "%L2"
+#define __pu_reg_oper1 "%H2"
+#endif
+
+#define __put_user_asm_dword(x, addr, err) \
+ asm volatile( \
+ "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \
+ "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: move %0, %3\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous" \
+ : "+r"(err) \
+ : "r"(addr), "r"(x), "i"(-EFAULT) \
+ : "cc")
+extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
+extern long strncpy_from_user(char *dest, const char __user * src, long count);
+extern __must_check long strlen_user(const char __user * str);
+extern __must_check long strnlen_user(const char __user * str, long n);
+extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
+ unsigned long n);
+extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
+ unsigned long n);
+
+#define raw_copy_from_user __arch_copy_from_user
+#define raw_copy_to_user __arch_copy_to_user
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+static inline unsigned long clear_user(void __user * to, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __arch_clear_user(to, n);
+ return n;
+}
+
+static inline unsigned long __clear_user(void __user * to, unsigned long n)
+{
+ return __arch_clear_user(to, n);
+}
+
+#endif /* _ASMNDS32_UACCESS_H */
diff --git a/arch/nds32/include/asm/unistd.h b/arch/nds32/include/asm/unistd.h
new file mode 100644
index 000000000000..b586a2862beb
--- /dev/null
+++ b/arch/nds32/include/asm/unistd.h
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#define __ARCH_WANT_SYS_CLONE
+
+#include <uapi/asm/unistd.h>
diff --git a/arch/nds32/include/asm/vdso.h b/arch/nds32/include/asm/vdso.h
new file mode 100644
index 000000000000..af2c6afc2469
--- /dev/null
+++ b/arch/nds32/include/asm/vdso.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2005-2017 Andes Technology Corporation
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <generated/vdso-offsets.h>
+
+#define VDSO_SYMBOL(base, name) \
+({ \
+ (unsigned long)(vdso_offset_##name + (unsigned long)(base)); \
+})
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h
new file mode 100644
index 000000000000..79db5a12ca5e
--- /dev/null
+++ b/arch/nds32/include/asm/vdso_datapage.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+#ifndef __ASM_VDSO_DATAPAGE_H
+#define __ASM_VDSO_DATAPAGE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+struct vdso_data {
+ bool cycle_count_down; /* timer cyclye counter is decrease with time */
+ u32 cycle_count_offset; /* offset of timer cycle counter register */
+ u32 seq_count; /* sequence count - odd during updates */
+ u32 xtime_coarse_sec; /* coarse time */
+ u32 xtime_coarse_nsec;
+
+ u32 wtm_clock_sec; /* wall to monotonic offset */
+ u32 wtm_clock_nsec;
+ u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
+ u32 cs_mult; /* clocksource multiplier */
+ u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */
+
+ u64 cs_cycle_last; /* last cycle value */
+ u64 cs_mask; /* clocksource mask */
+
+ u64 xtime_clock_nsec; /* CLOCK_REALTIME sub-ns base */
+ u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
+ u32 tz_dsttime;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/nds32/include/asm/vdso_timer_info.h b/arch/nds32/include/asm/vdso_timer_info.h
new file mode 100644
index 000000000000..50ba117cff12
--- /dev/null
+++ b/arch/nds32/include/asm/vdso_timer_info.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+extern struct timer_info_t timer_info;
+#define EMPTY_VALUE ~(0UL)
+#define EMPTY_TIMER_MAPPING EMPTY_VALUE
+#define EMPTY_REG_OFFSET EMPTY_VALUE
+
+struct timer_info_t
+{
+ bool cycle_count_down;
+ unsigned long mapping_base;
+ unsigned long cycle_count_reg_offset;
+};
diff --git a/arch/nds32/include/uapi/asm/Kbuild b/arch/nds32/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..40be972faf9e
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/Kbuild
@@ -0,0 +1,29 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += bpf_perf_event.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += shmbuf.h
+generic-y += bitsperlong.h
+generic-y += fcntl.h
+generic-y += stat.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += swab.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000000..56043ce4972f
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/auxvec.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_AUXVEC_H
+#define __ASM_AUXVEC_H
+
+/* VDSO location */
+#define AT_SYSINFO_EHDR 33
+
+#define AT_VECTOR_SIZE_ARCH 1
+
+#endif
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..a23f6f3a2468
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/byteorder.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __NDS32_BYTEORDER_H__
+#define __NDS32_BYTEORDER_H__
+
+#ifdef __NDS32_EB__
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* __NDS32_BYTEORDER_H__ */
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..4cdca9b23974
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
+// Copyright (C) 2005-2017 Andes Technology Corporation
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
+
+/*
+ * Options for cacheflush system call
+ */
+#define ICACHE 0 /* flush instruction cache */
+#define DCACHE 1 /* writeback and flush data cache */
+#define BCACHE 2 /* flush instruction cache + writeback and flush data cache */
+
+#endif /* _ASM_CACHECTL */
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h
new file mode 100644
index 000000000000..e3fb723ee362
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/param.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __ASM_NDS32_PARAM_H
+#define __ASM_NDS32_PARAM_H
+
+#define EXEC_PAGESIZE 8192
+
+#include <asm-generic/param.h>
+
+#endif /* __ASM_NDS32_PARAM_H */
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..358c99e399d0
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/ptrace.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef __UAPI_ASM_NDS32_PTRACE_H
+#define __UAPI_ASM_NDS32_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * User structures for general purpose register.
+ */
+struct user_pt_regs {
+ long uregs[26];
+ long fp;
+ long gp;
+ long lp;
+ long sp;
+ long ipc;
+ long lb;
+ long le;
+ long lc;
+ long syscallno;
+};
+#endif
+#endif
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..00567b237b0c
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#ifndef _ASMNDS32_SIGCONTEXT_H
+#define _ASMNDS32_SIGCONTEXT_H
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+
+struct zol_struct {
+ unsigned long nds32_lc; /* $LC */
+ unsigned long nds32_le; /* $LE */
+ unsigned long nds32_lb; /* $LB */
+};
+
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long nds32_r0;
+ unsigned long nds32_r1;
+ unsigned long nds32_r2;
+ unsigned long nds32_r3;
+ unsigned long nds32_r4;
+ unsigned long nds32_r5;
+ unsigned long nds32_r6;
+ unsigned long nds32_r7;
+ unsigned long nds32_r8;
+ unsigned long nds32_r9;
+ unsigned long nds32_r10;
+ unsigned long nds32_r11;
+ unsigned long nds32_r12;
+ unsigned long nds32_r13;
+ unsigned long nds32_r14;
+ unsigned long nds32_r15;
+ unsigned long nds32_r16;
+ unsigned long nds32_r17;
+ unsigned long nds32_r18;
+ unsigned long nds32_r19;
+ unsigned long nds32_r20;
+ unsigned long nds32_r21;
+ unsigned long nds32_r22;
+ unsigned long nds32_r23;
+ unsigned long nds32_r24;
+ unsigned long nds32_r25;
+ unsigned long nds32_fp; /* $r28 */
+ unsigned long nds32_gp; /* $r29 */
+ unsigned long nds32_lp; /* $r30 */
+ unsigned long nds32_sp; /* $r31 */
+ unsigned long nds32_ipc;
+ unsigned long fault_address;
+ unsigned long used_math_flag;
+ /* FPU Registers */
+ struct zol_struct zol;
+};
+
+#endif
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..6e95901cabe3
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#define __ARCH_WANT_SYNC_FILE_RANGE2
+
+/* Use the standard ABI for syscalls */
+#include <asm-generic/unistd.h>
+
+/* Additional NDS32 specific syscalls. */
+#define __NR_cacheflush (__NR_arch_specific_syscall)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile
new file mode 100644
index 000000000000..42792743e8b9
--- /dev/null
+++ b/arch/nds32/kernel/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for the linux kernel.
+#
+
+CPPFLAGS_vmlinux.lds := -DTEXTADDR=$(TEXTADDR)
+AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR)
+
+# Object file lists.
+
+obj-y := ex-entry.o ex-exit.o ex-scall.o irq.o \
+ process.o ptrace.o setup.o signal.o \
+ sys_nds32.o time.o traps.o cacheinfo.o \
+ dma.o syscall_table.o vdso.o
+
+obj-$(CONFIG_MODULES) += nds32_ksyms.o module.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_OF) += devtree.o
+obj-$(CONFIG_CACHE_L2) += atl2c.o
+
+extra-y := head.o vmlinux.lds
+
+
+obj-y += vdso/
diff --git a/arch/nds32/kernel/asm-offsets.c b/arch/nds32/kernel/asm-offsets.c
new file mode 100644
index 000000000000..3541d5981de7
--- /dev/null
+++ b/arch/nds32/kernel/asm-offsets.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kbuild.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT,
+ offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(THREAD_CPU_CONTEXT,
+ offsetof(struct task_struct, thread.cpu_context));
+ DEFINE(OSP_OFFSET, offsetof(struct pt_regs, osp));
+ DEFINE(SP_OFFSET, offsetof(struct pt_regs, sp));
+ DEFINE(FUCOP_CTL_OFFSET, offsetof(struct pt_regs, fucop_ctl));
+ DEFINE(IPSW_OFFSET, offsetof(struct pt_regs, ipsw));
+ DEFINE(SYSCALLNO_OFFSET, offsetof(struct pt_regs, syscallno));
+ DEFINE(IPC_OFFSET, offsetof(struct pt_regs, ipc));
+ DEFINE(R0_OFFSET, offsetof(struct pt_regs, uregs[0]));
+ DEFINE(R15_OFFSET, offsetof(struct pt_regs, uregs[15]));
+ DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
+ return 0;
+}
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c
new file mode 100644
index 000000000000..0c6d031a1c4a
--- /dev/null
+++ b/arch/nds32/kernel/atl2c.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/compiler.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <asm/l2_cache.h>
+
+void __iomem *atl2c_base;
+static const struct of_device_id atl2c_ids[] __initconst = {
+ {.compatible = "andestech,atl2c",}
+};
+
+static int __init atl2c_of_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ unsigned long tmp = 0;
+ unsigned long l2set, l2way, l2clsz;
+
+ if (!(__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C))
+ return -ENODEV;
+
+ np = of_find_matching_node(NULL, atl2c_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ atl2c_base = ioremap(res.start, resource_size(&res));
+ if (!atl2c_base)
+ return -ENOMEM;
+
+ l2set =
+ 64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >>
+ L2_CA_CONF_offL2SET);
+ l2way =
+ 1 +
+ ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >>
+ L2_CA_CONF_offL2WAY);
+ l2clsz =
+ 4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >>
+ L2_CA_CONF_offL2CLSZ);
+ pr_info("L2:%luKB/%luS/%luW/%luB\n",
+ l2set * l2way * l2clsz / 1024, l2set, l2way, l2clsz);
+
+ tmp = L2C_R_REG(L2CC_PROT_OFF);
+ tmp &= ~L2CC_PROT_mskMRWEN;
+ L2C_W_REG(L2CC_PROT_OFF, tmp);
+
+ tmp = L2C_R_REG(L2CC_SETUP_OFF);
+ tmp &= ~L2CC_SETUP_mskPART;
+ L2C_W_REG(L2CC_SETUP_OFF, tmp);
+
+ tmp = L2C_R_REG(L2CC_CTRL_OFF);
+ tmp |= L2CC_CTRL_mskEN;
+ L2C_W_REG(L2CC_CTRL_OFF, tmp);
+
+ return 0;
+}
+
+subsys_initcall(atl2c_of_init);
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
new file mode 100644
index 000000000000..0a7bc696dd55
--- /dev/null
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ char cache_type = (type & CACHE_TYPE_INST ? ICACHE : DCACHE);
+
+ this_leaf->level = level;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
+ this_leaf->number_of_sets = CACHE_SET(cache_type);;
+ this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
+ this_leaf->size = this_leaf->number_of_sets *
+ this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
+#if defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+ this_leaf->attributes = CACHE_WRITE_THROUGH;
+#else
+ this_leaf->attributes = CACHE_WRITE_BACK;
+#endif
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ /* Only 1 level and I/D cache seperate. */
+ this_cpu_ci->num_levels = 1;
+ this_cpu_ci->num_leaves = 2;
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ }
+ return 0;
+}
diff --git a/arch/nds32/kernel/devtree.c b/arch/nds32/kernel/devtree.c
new file mode 100644
index 000000000000..bdce0fe5af9f
--- /dev/null
+++ b/arch/nds32/kernel/devtree.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/bug.h>
+#include <linux/printk.h>
+#include <linux/of_fdt.h>
+
+void __init early_init_devtree(void *params)
+{
+ if (!params || !early_init_dt_scan(params)) {
+ pr_crit("\n"
+ "Error: invalid device tree blob at (virtual address 0x%p)\n"
+ "\nPlease check your bootloader.", params);
+
+ BUG_ON(1);
+ }
+
+ dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+}
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
new file mode 100644
index 000000000000..d291800fc621
--- /dev/null
+++ b/arch/nds32/kernel/dma.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/dma-mapping.h>
+#include <asm/proc-fns.h>
+
+/*
+ * This is the page table (2MB) covering uncached, DMA consistent allocations
+ */
+static pte_t *consistent_pte;
+static DEFINE_RAW_SPINLOCK(consistent_lock);
+
+enum master_type {
+ FOR_CPU = 0,
+ FOR_DEVICE = 1,
+};
+
+/*
+ * VM region handling support.
+ *
+ * This should become something generic, handling VM region allocations for
+ * vmalloc and similar (ioremap, module space, etc).
+ *
+ * I envisage vmalloc()'s supporting vm_struct becoming:
+ *
+ * struct vm_struct {
+ * struct vm_region region;
+ * unsigned long flags;
+ * struct page **pages;
+ * unsigned int nr_pages;
+ * unsigned long phys_addr;
+ * };
+ *
+ * get_vm_area() would then call vm_region_alloc with an appropriate
+ * struct vm_region head (eg):
+ *
+ * struct vm_region vmalloc_head = {
+ * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
+ * .vm_start = VMALLOC_START,
+ * .vm_end = VMALLOC_END,
+ * };
+ *
+ * However, vmalloc_head.vm_start is variable (typically, it is dependent on
+ * the amount of RAM found at boot time.) I would imagine that get_vm_area()
+ * would have to initialise this each time prior to calling vm_region_alloc().
+ */
+struct arch_vm_region {
+ struct list_head vm_list;
+ unsigned long vm_start;
+ unsigned long vm_end;
+ struct page *vm_pages;
+};
+
+static struct arch_vm_region consistent_head = {
+ .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
+ .vm_start = CONSISTENT_BASE,
+ .vm_end = CONSISTENT_END,
+};
+
+static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head,
+ size_t size, int gfp)
+{
+ unsigned long addr = head->vm_start, end = head->vm_end - size;
+ unsigned long flags;
+ struct arch_vm_region *c, *new;
+
+ new = kmalloc(sizeof(struct arch_vm_region), gfp);
+ if (!new)
+ goto out;
+
+ raw_spin_lock_irqsave(&consistent_lock, flags);
+
+ list_for_each_entry(c, &head->vm_list, vm_list) {
+ if ((addr + size) < addr)
+ goto nospc;
+ if ((addr + size) <= c->vm_start)
+ goto found;
+ addr = c->vm_end;
+ if (addr > end)
+ goto nospc;
+ }
+
+found:
+ /*
+ * Insert this entry _before_ the one we found.
+ */
+ list_add_tail(&new->vm_list, &c->vm_list);
+ new->vm_start = addr;
+ new->vm_end = addr + size;
+
+ raw_spin_unlock_irqrestore(&consistent_lock, flags);
+ return new;
+
+nospc:
+ raw_spin_unlock_irqrestore(&consistent_lock, flags);
+ kfree(new);
+out:
+ return NULL;
+}
+
+static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
+ unsigned long addr)
+{
+ struct arch_vm_region *c;
+
+ list_for_each_entry(c, &head->vm_list, vm_list) {
+ if (c->vm_start == addr)
+ goto out;
+ }
+ c = NULL;
+out:
+ return c;
+}
+
+/* FIXME: attrs is not used. */
+static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct page *page;
+ struct arch_vm_region *c;
+ unsigned long order;
+ u64 mask = ~0ULL, limit;
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+
+ if (!consistent_pte) {
+ pr_err("%s: not initialized\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ if (dev) {
+ mask = dev->coherent_dma_mask;
+
+ /*
+ * Sanity check the DMA mask - it must be non-zero, and
+ * must be able to be satisfied by a DMA allocation.
+ */
+ if (mask == 0) {
+ dev_warn(dev, "coherent DMA mask is unset\n");
+ goto no_page;
+ }
+
+ }
+
+ /*
+ * Sanity check the allocation size.
+ */
+ size = PAGE_ALIGN(size);
+ limit = (mask + 1) & ~mask;
+ if ((limit && size >= limit) ||
+ size >= (CONSISTENT_END - CONSISTENT_BASE)) {
+ pr_warn("coherent allocation too big "
+ "(requested %#x mask %#llx)\n", size, mask);
+ goto no_page;
+ }
+
+ order = get_order(size);
+
+ if (mask != 0xffffffff)
+ gfp |= GFP_DMA;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ goto no_page;
+
+ /*
+ * Invalidate any data that might be lurking in the
+ * kernel direct-mapped region for device DMA.
+ */
+ {
+ unsigned long kaddr = (unsigned long)page_address(page);
+ memset(page_address(page), 0, size);
+ cpu_dma_wbinval_range(kaddr, kaddr + size);
+ }
+
+ /*
+ * Allocate a virtual address in the consistent mapping region.
+ */
+ c = vm_region_alloc(&consistent_head, size,
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+ if (c) {
+ pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+ struct page *end = page + (1 << order);
+
+ c->vm_pages = page;
+
+ /*
+ * Set the "dma handle"
+ */
+ *handle = page_to_phys(page);
+
+ do {
+ BUG_ON(!pte_none(*pte));
+
+ /*
+ * x86 does not mark the pages reserved...
+ */
+ SetPageReserved(page);
+ set_pte(pte, mk_pte(page, prot));
+ page++;
+ pte++;
+ } while (size -= PAGE_SIZE);
+
+ /*
+ * Free the otherwise unused pages.
+ */
+ while (page < end) {
+ __free_page(page);
+ page++;
+ }
+
+ return (void *)c->vm_start;
+ }
+
+ if (page)
+ __free_pages(page, order);
+no_page:
+ *handle = ~0;
+ return NULL;
+}
+
+static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs)
+{
+ struct arch_vm_region *c;
+ unsigned long flags, addr;
+ pte_t *ptep;
+
+ size = PAGE_ALIGN(size);
+
+ raw_spin_lock_irqsave(&consistent_lock, flags);
+
+ c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+ if (!c)
+ goto no_area;
+
+ if ((c->vm_end - c->vm_start) != size) {
+ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+ addr = c->vm_start;
+ do {
+ pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
+ unsigned long pfn;
+
+ ptep++;
+ addr += PAGE_SIZE;
+
+ if (!pte_none(pte) && pte_present(pte)) {
+ pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ /*
+ * x86 does not mark the pages reserved...
+ */
+ ClearPageReserved(page);
+
+ __free_page(page);
+ continue;
+ }
+ }
+
+ pr_crit("%s: bad page in kernel page table\n", __func__);
+ } while (size -= PAGE_SIZE);
+
+ flush_tlb_kernel_range(c->vm_start, c->vm_end);
+
+ list_del(&c->vm_list);
+
+ raw_spin_unlock_irqrestore(&consistent_lock, flags);
+
+ kfree(c);
+ return;
+
+no_area:
+ raw_spin_unlock_irqrestore(&consistent_lock, flags);
+ pr_err("%s: trying to free invalid coherent area: %p\n",
+ __func__, cpu_addr);
+ dump_stack();
+}
+
+/*
+ * Initialise the consistent memory allocation.
+ */
+static int __init consistent_init(void)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int ret = 0;
+
+ do {
+ pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
+ pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+ if (!pmd) {
+ pr_err("%s: no pmd tables\n", __func__);
+ ret = -ENOMEM;
+ break;
+ }
+ /* The first level mapping may be created in somewhere.
+ * It's not necessary to warn here. */
+ /* WARN_ON(!pmd_none(*pmd)); */
+
+ pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
+ if (!pte) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ consistent_pte = pte;
+ } while (0);
+
+ return ret;
+}
+
+core_initcall(consistent_init);
+static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
+static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
+ return page_to_phys(page) + offset;
+}
+
+static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
+}
+
+/*
+ * Make an area consistent for devices.
+ */
+static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
+{
+ unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
+
+ if (master_type == FOR_CPU) {
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ cpu_dma_inval_range(start, end);
+ break;
+ default:
+ BUG();
+ }
+ } else {
+ /* FOR_DEVICE */
+ switch (direction) {
+ case DMA_FROM_DEVICE:
+ break;
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ cpu_dma_wb_range(start, end);
+ break;
+ default:
+ BUG();
+ }
+ }
+}
+
+static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int i;
+
+ for (i = 0; i < nents; i++, sg++) {
+ void *virt;
+ unsigned long pfn;
+ struct page *page = sg_page(sg);
+
+ sg->dma_address = sg_phys(sg);
+ pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
+ page = pfn_to_page(pfn);
+ if (PageHighMem(page)) {
+ virt = kmap_atomic(page);
+ consistent_sync(virt, sg->length, dir, FOR_CPU);
+ kunmap_atomic(virt);
+ } else {
+ if (sg->offset > PAGE_SIZE)
+ panic("sg->offset:%08x > PAGE_SIZE\n",
+ sg->offset);
+ virt = page_address(page) + sg->offset;
+ consistent_sync(virt, sg->length, dir, FOR_CPU);
+ }
+ }
+ return nents;
+}
+
+static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+
+static void
+nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
+}
+
+static void
+nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
+}
+
+static void
+nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nents; i++, sg++) {
+ char *virt =
+ page_address((struct page *)sg->page_link) + sg->offset;
+ consistent_sync(virt, sg->length, dir, FOR_CPU);
+ }
+}
+
+static void
+nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nents; i++, sg++) {
+ char *virt =
+ page_address((struct page *)sg->page_link) + sg->offset;
+ consistent_sync(virt, sg->length, dir, FOR_DEVICE);
+ }
+}
+
+struct dma_map_ops nds32_dma_ops = {
+ .alloc = nds32_dma_alloc_coherent,
+ .free = nds32_dma_free,
+ .map_page = nds32_dma_map_page,
+ .unmap_page = nds32_dma_unmap_page,
+ .map_sg = nds32_dma_map_sg,
+ .unmap_sg = nds32_dma_unmap_sg,
+ .sync_single_for_device = nds32_dma_sync_single_for_device,
+ .sync_single_for_cpu = nds32_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = nds32_dma_sync_sg_for_device,
+};
+
+EXPORT_SYMBOL(nds32_dma_ops);
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
new file mode 100644
index 000000000000..a72e83d804f5
--- /dev/null
+++ b/arch/nds32/kernel/ex-entry.S
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+#include <asm/nds32.h>
+#include <asm/errno.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_HWZOL
+ .macro push_zol
+ mfusr $r14, $LB
+ mfusr $r15, $LE
+ mfusr $r16, $LC
+ .endm
+#endif
+
+ .macro save_user_regs
+
+ smw.adm $sp, [$sp], $sp, #0x1
+ /* move $SP to the bottom of pt_regs */
+ addi $sp, $sp, -OSP_OFFSET
+
+ /* push $r0 ~ $r25 */
+ smw.bim $r0, [$sp], $r25
+ /* push $fp, $gp, $lp */
+ smw.bim $sp, [$sp], $sp, #0xe
+
+ mfsr $r12, $SP_USR
+ mfsr $r13, $IPC
+#ifdef CONFIG_HWZOL
+ push_zol
+#endif
+ movi $r17, -1
+ move $r18, $r0
+ mfsr $r19, $PSW
+ mfsr $r20, $IPSW
+ mfsr $r21, $P_IPSW
+ mfsr $r22, $P_IPC
+ mfsr $r23, $P_P0
+ mfsr $r24, $P_P1
+ smw.bim $r12, [$sp], $r24, #0
+ addi $sp, $sp, -FUCOP_CTL_OFFSET
+
+ /* Initialize kernel space $fp */
+ andi $p0, $r20, #PSW_mskPOM
+ movi $p1, #0x0
+ cmovz $fp, $p1, $p0
+
+ andi $r16, $r19, #PSW_mskINTL
+ slti $r17, $r16, #4
+ bnez $r17, 1f
+ addi $r17, $r19, #-2
+ mtsr $r17, $PSW
+ isb
+1:
+ /* If it was superuser mode, we don't need to update $r25 */
+ bnez $p0, 2f
+ la $p0, __entry_task
+ lw $r25, [$p0]
+2:
+ .endm
+
+ .text
+
+/*
+ * Exception Vector
+ */
+exception_handlers:
+ .long unhandled_exceptions !Reset/NMI
+ .long unhandled_exceptions !TLB fill
+ .long do_page_fault !PTE not present
+ .long do_dispatch_tlb_misc !TLB misc
+ .long unhandled_exceptions !TLB VLPT
+ .long unhandled_exceptions !Machine Error
+ .long do_debug_trap !Debug related
+ .long do_dispatch_general !General exception
+ .long eh_syscall !Syscall
+ .long asm_do_IRQ !IRQ
+
+common_exception_handler:
+ save_user_regs
+ mfsr $p0, $ITYPE
+ andi $p0, $p0, #ITYPE_mskVECTOR
+ srli $p0, $p0, #ITYPE_offVECTOR
+ andi $p1, $p0, #NDS32_VECTOR_mskNONEXCEPTION
+ bnez $p1, 1f
+ sethi $lp, hi20(ret_from_exception)
+ ori $lp, $lp, lo12(ret_from_exception)
+ sethi $p1, hi20(exception_handlers)
+ ori $p1, $p1, lo12(exception_handlers)
+ lw $p1, [$p1+$p0<<2]
+ move $r0, $p0
+ mfsr $r1, $EVA
+ mfsr $r2, $ITYPE
+ move $r3, $sp
+ mfsr $r4, $OIPC
+ /* enable gie if it is enabled in IPSW. */
+ mfsr $r21, $PSW
+ andi $r20, $r20, #PSW_mskGIE /* r20 is $IPSW*/
+ or $r21, $r21, $r20
+ mtsr $r21, $PSW
+ dsb
+ jr $p1
+
+ /* syscall */
+1:
+ addi $p1, $p0, #-NDS32_VECTOR_offEXCEPTION
+ bnez $p1, 2f
+ sethi $lp, hi20(ret_from_exception)
+ ori $lp, $lp, lo12(ret_from_exception)
+ sethi $p1, hi20(exception_handlers)
+ ori $p1, $p1, lo12(exception_handlers)
+ lwi $p1, [$p1+#NDS32_VECTOR_offEXCEPTION<<2]
+ jr $p1
+
+ /* interrupt */
+2:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ jal arch_trace_hardirqs_off
+#endif
+ move $r0, $sp
+ sethi $lp, hi20(ret_from_intr)
+ ori $lp, $lp, lo12(ret_from_intr)
+ sethi $p0, hi20(exception_handlers)
+ ori $p0, $p0, lo12(exception_handlers)
+ lwi $p0, [$p0+#NDS32_VECTOR_offINTERRUPT<<2]
+ jr $p0
+
+ .macro EXCEPTION_VECTOR_DEBUG
+ .align 4
+ mfsr $p0, $EDM_CTL
+ andi $p0, $p0, EDM_CTL_mskV3_EDM_MODE
+ tnez $p0, SWID_RAISE_INTERRUPT_LEVEL
+ .endm
+
+ .macro EXCEPTION_VECTOR
+ .align 4
+ sethi $p0, hi20(common_exception_handler)
+ ori $p0, $p0, lo12(common_exception_handler)
+ jral.ton $p0, $p0
+ .endm
+
+ .section ".text.init", #alloc, #execinstr
+ .global exception_vector
+exception_vector:
+.rept 6
+ EXCEPTION_VECTOR
+.endr
+ EXCEPTION_VECTOR_DEBUG
+.rept 121
+ EXCEPTION_VECTOR
+.endr
+ .align 4
+ .global exception_vector_end
+exception_vector_end:
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
new file mode 100644
index 000000000000..03e4f7788a18
--- /dev/null
+++ b/arch/nds32/kernel/ex-exit.S
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/nds32.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/current.h>
+
+
+
+#ifdef CONFIG_HWZOL
+ .macro pop_zol
+ mtusr $r14, $LB
+ mtusr $r15, $LE
+ mtusr $r16, $LC
+ .endm
+#endif
+
+ .macro restore_user_regs_first
+ setgie.d
+ isb
+
+ addi $sp, $sp, FUCOP_CTL_OFFSET
+
+ lmw.adm $r12, [$sp], $r24, #0x0
+ mtsr $r12, $SP_USR
+ mtsr $r13, $IPC
+#ifdef CONFIG_HWZOL
+ pop_zol
+#endif
+ mtsr $r19, $PSW
+ mtsr $r20, $IPSW
+ mtsr $r21, $P_IPSW
+ mtsr $r22, $P_IPC
+ mtsr $r23, $P_P0
+ mtsr $r24, $P_P1
+ lmw.adm $sp, [$sp], $sp, #0xe
+ .endm
+
+ .macro restore_user_regs_last
+ pop $p0
+ cmovn $sp, $p0, $p0
+
+ iret
+ nop
+
+ .endm
+
+ .macro restore_user_regs
+ restore_user_regs_first
+ lmw.adm $r0, [$sp], $r25, #0x0
+ addi $sp, $sp, OSP_OFFSET
+ restore_user_regs_last
+ .endm
+
+ .macro fast_restore_user_regs
+ restore_user_regs_first
+ lmw.adm $r1, [$sp], $r25, #0x0
+ addi $sp, $sp, OSP_OFFSET-4
+ restore_user_regs_last
+ .endm
+
+#ifdef CONFIG_PREEMPT
+ .macro preempt_stop
+ .endm
+#else
+ .macro preempt_stop
+ setgie.d
+ isb
+ .endm
+#define resume_kernel no_work_pending
+#endif
+
+ENTRY(ret_from_exception)
+ preempt_stop
+ENTRY(ret_from_intr)
+
+/*
+ * judge Kernel or user mode
+ *
+ */
+ lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
+ andi $p0, $p0, #PSW_mskINTL
+ bnez $p0, resume_kernel ! done with iret
+ j resume_userspace
+
+
+/*
+ * This is the fast syscall return path. We do as little as
+ * possible here, and this includes saving $r0 back into the SVC
+ * stack.
+ * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
+ */
+ENTRY(ret_fast_syscall)
+ gie_disable
+ lwi $r1, [tsk+#TSK_TI_FLAGS]
+ andi $p1, $r1, #_TIF_WORK_MASK
+ bnez $p1, fast_work_pending
+ fast_restore_user_regs ! iret
+
+/*
+ * Ok, we need to do extra processing,
+ * enter the slow path returning from syscall, while pending work.
+ */
+fast_work_pending:
+ swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
+work_pending:
+ andi $p1, $r1, #_TIF_NEED_RESCHED
+ bnez $p1, work_resched
+
+ andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
+ beqz $p1, no_work_pending
+
+ move $r0, $sp ! 'regs'
+ gie_enable
+ bal do_notify_resume
+ b ret_slow_syscall
+work_resched:
+ bal schedule ! path, return to user mode
+
+/*
+ * "slow" syscall return path.
+ */
+ENTRY(resume_userspace)
+ENTRY(ret_slow_syscall)
+ gie_disable
+ lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
+ andi $p0, $p0, #PSW_mskINTL
+ bnez $p0, no_work_pending ! done with iret
+ lwi $r1, [tsk+#TSK_TI_FLAGS]
+ andi $p1, $r1, #_TIF_WORK_MASK
+ bnez $p1, work_pending ! handle work_resched, sig_pend
+
+no_work_pending:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lwi $p0, [$sp+(#IPSW_OFFSET)]
+ andi $p0, $p0, #0x1
+ la $r10, trace_hardirqs_off
+ la $r9, trace_hardirqs_on
+ cmovz $r9, $p0, $r10
+ jral $r9
+#endif
+ restore_user_regs ! return from iret
+
+
+/*
+ * preemptive kernel
+ */
+#ifdef CONFIG_PREEMPT
+resume_kernel:
+ gie_disable
+ lwi $t0, [tsk+#TSK_TI_PREEMPT]
+ bnez $t0, no_work_pending
+need_resched:
+ lwi $t0, [tsk+#TSK_TI_FLAGS]
+ andi $p1, $t0, #_TIF_NEED_RESCHED
+ beqz $p1, no_work_pending
+
+ lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
+ andi $t0, $t0, #1
+ beqz $t0, no_work_pending
+
+ jal preempt_schedule_irq
+ b need_resched
+#endif
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ bal schedule_tail
+ beqz $r6, 1f ! r6 stores fn for kernel thread
+ move $r0, $r7 ! prepare kernel thread arg
+ jral $r6
+1:
+ lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
+ andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
+ beqz $p1, ret_slow_syscall
+ move $r0, $sp
+ bal syscall_trace_leave
+ b ret_slow_syscall
diff --git a/arch/nds32/kernel/ex-scall.S b/arch/nds32/kernel/ex-scall.S
new file mode 100644
index 000000000000..36aa87ecdabd
--- /dev/null
+++ b/arch/nds32/kernel/ex-scall.S
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/nds32.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/current.h>
+
+/*
+ * $r0 = previous task_struct,
+ * $r1 = next task_struct,
+ * previous and next are guaranteed not to be the same.
+ */
+
+ENTRY(__switch_to)
+
+ la $p0, __entry_task
+ sw $r1, [$p0]
+ move $p1, $r0
+ addi $p1, $p1, #THREAD_CPU_CONTEXT
+ smw.bi $r6, [$p1], $r14, #0xb ! push r6~r14, fp, lp, sp
+ move $r25, $r1
+ addi $r1, $r1, #THREAD_CPU_CONTEXT
+ lmw.bi $r6, [$r1], $r14, #0xb ! pop r6~r14, fp, lp, sp
+ ret
+
+
+#define tbl $r8
+
+/*
+ * $r7 will be writen as syscall nr
+ */
+ .macro get_scno
+ lwi $r7, [$sp + R15_OFFSET]
+ swi $r7, [$sp + SYSCALLNO_OFFSET]
+ .endm
+
+ .macro updateipc
+ addi $r17, $r13, #4 ! $r13 is $IPC
+ swi $r17, [$sp + IPC_OFFSET]
+ .endm
+
+ENTRY(eh_syscall)
+ updateipc
+
+ get_scno
+ gie_enable
+
+ lwi $p0, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
+
+ andi $p1, $p0, #_TIF_WORK_SYSCALL_ENTRY ! are we tracing syscalls?
+ bnez $p1, __sys_trace
+
+ la $lp, ret_fast_syscall ! return address
+jmp_systbl:
+ addi $p1, $r7, #-__NR_syscalls ! syscall number of syscall instruction is guarded by addembler
+ bgez $p1, _SCNO_EXCEED ! call sys_* routine
+ la tbl, sys_call_table ! load syscall table pointer
+ slli $p1, $r7, #2
+ add $p1, tbl, $p1
+ lwi $p1, [$p1]
+ jr $p1 ! no return
+
+_SCNO_EXCEED:
+ ori $r0, $r7, #0
+ ori $r1, $sp, #0
+ b bad_syscall
+
+/*
+ * This is the really slow path. We're going to be doing
+ * context switches, and waiting for our parent to respond.
+ */
+__sys_trace:
+ move $r0, $sp
+ bal syscall_trace_enter
+ move $r7, $r0
+ la $lp, __sys_trace_return ! return address
+
+ addi $p1, $r7, #1
+ beqz $p1, ret_slow_syscall ! fatal signal is pending
+
+ addi $p1, $sp, #R0_OFFSET ! pointer to regs
+ lmw.bi $r0, [$p1], $r5 ! have to reload $r0 - $r5
+ b jmp_systbl
+
+__sys_trace_return:
+ swi $r0, [$sp+#R0_OFFSET] ! T: save returned $r0
+ move $r0, $sp ! set pt_regs for syscall_trace_leave
+ bal syscall_trace_leave
+ b ret_slow_syscall
+
+ENTRY(sys_rt_sigreturn_wrapper)
+ addi $r0, $sp, #0
+ b sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn_wrapper)
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
new file mode 100644
index 000000000000..71f57bd70f3b
--- /dev/null
+++ b/arch/nds32/kernel/head.S
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sizes.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define OF_DT_MAGIC 0xd00dfeed
+#else
+#define OF_DT_MAGIC 0xedfe0dd0
+#endif
+
+ .globl swapper_pg_dir
+ .equ swapper_pg_dir, TEXTADDR - 0x4000
+
+/*
+ * Kernel startup entry point.
+ */
+ .section ".head.text", "ax"
+ .type _stext, %function
+ENTRY(_stext)
+ setgie.d ! Disable interrupt
+ isb
+/*
+ * Disable I/D-cache and enable it at a proper time
+ */
+ mfsr $r0, $mr8
+ li $r1, #~(CACHE_CTL_mskIC_EN|CACHE_CTL_mskDC_EN)
+ and $r0, $r0, $r1
+ mtsr $r0, $mr8
+
+/*
+ * Process device tree blob
+ */
+ andi $r0,$r2,#0x3
+ li $r10, 0
+ bne $r0, $r10, _nodtb
+ lwi $r0, [$r2]
+ li $r1, OF_DT_MAGIC
+ bne $r0, $r1, _nodtb
+ move $r10, $r2
+_nodtb:
+
+/*
+ * Create a temporary mapping area for booting, before start_kernel
+ */
+ sethi $r4, hi20(swapper_pg_dir)
+ li $p0, (PAGE_OFFSET - PHYS_OFFSET)
+ sub $r4, $r4, $p0
+ tlbop FlushAll ! invalidate TLB\n"
+ isb
+ mtsr $r4, $L1_PPTB ! load page table pointer\n"
+
+/* set NTC0 cacheable/writeback, mutliple page size in use */
+ mfsr $r3, $MMU_CTL
+ li $r0, #~MMU_CTL_mskNTC0
+ and $r3, $r3, $r0
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+ ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0))
+#else
+ ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB)
+#endif
+#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
+ li $r0, #MMU_CTL_UNA
+ or $r3, $r3, $r0
+#endif
+ mtsr $r3, $MMU_CTL
+ isb
+
+/* set page size and size of kernel image */
+ mfsr $r0, $MMU_CFG
+ srli $r3, $r0, MMU_CFG_offfEPSZ
+ zeb $r3, $r3
+ bnez $r3, _extra_page_size_support
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+ li $r5, #SZ_4K ! Use 4KB page size
+#else
+ li $r5, #SZ_8K ! Use 8KB page size
+ li $r3, #1
+#endif
+ mtsr $r3, $TLB_MISC
+ b _image_size_check
+
+_extra_page_size_support: ! Use epzs pages size
+ clz $r6, $r3
+ subri $r2, $r6, #31
+ li $r3, #1
+ sll $r3, $r3, $r2
+ /* MMU_CFG.EPSZ value -> meaning */
+ mul $r5, $r3, $r3
+ slli $r5, $r5, #14
+ /* MMU_CFG.EPSZ -> TLB_MISC.ACC_PSZ */
+ addi $r3, $r2, #0x2
+ mtsr $r3, $TLB_MISC
+
+_image_size_check:
+ /* calculate the image maximum size accepted by TLB config */
+ andi $r6, $r0, MMU_CFG_mskTBW
+ andi $r0, $r0, MMU_CFG_mskTBS
+ srli $r6, $r6, MMU_CFG_offTBW
+ srli $r0, $r0, MMU_CFG_offTBS
+ /*
+ * we just map the kernel to the maximum way - 1 of tlb
+ * reserver one way for UART VA mapping
+ * it will cause page fault if UART mapping cover the kernel mapping
+ *
+ * direct mapping is not supported now.
+ */
+ li $r2, 't'
+ beqz $r6, __error ! MMU_CFG.TBW = 0 is direct mappin
+ addi $r0, $r0, #0x2 ! MMU_CFG.TBS value -> meaning
+ sll $r0, $r6, $r0 ! entries = k-way * n-set
+ mul $r6, $r0, $r5 ! max size = entries * page size
+ /* check kernel image size */
+ la $r3, (_end - PAGE_OFFSET)
+ li $r2, 's'
+ bgt $r3, $r6, __error
+
+ li $r2, #(PHYS_OFFSET + TLB_DATA_kernel_text_attr)
+ li $r3, PAGE_OFFSET
+ add $r6, $r6, $r3
+
+_tlb:
+ mtsr $r3, $TLB_VPN
+ dsb
+ tlbop $r2, RWR
+ isb
+ add $r3, $r3, $r5
+ add $r2, $r2, $r5
+ bgt $r6, $r3, _tlb
+ mfsr $r3, $TLB_MISC ! setup access page size
+ li $r2, #~0xf
+ and $r3, $r3, $r2
+#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
+ ori $r3, $r3, #0x1
+#endif
+ mtsr $r3, $TLB_MISC
+
+ mfsr $r0, $MISC_CTL ! Enable BTB and RTP and shadow sp
+ ori $r0, $r0, #MISC_init
+ mtsr $r0, $MISC_CTL
+
+ mfsr $p1, $PSW
+ li $r15, #~PSW_clr ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE
+ and $p1, $p1, $r15
+ ori $p1, $p1, #PSW_init
+ mtsr $p1, $IPSW ! when iret, it will automatically enable MMU
+ la $lp, __mmap_switched
+ mtsr $lp, $IPC
+ iret
+ nop
+
+ .type __switch_data, %object
+__switch_data:
+ .long __bss_start ! $r6
+ .long _end ! $r7
+ .long __atags_pointer ! $atag_pointer
+ .long init_task ! $r9, move to $r25
+ .long init_thread_union + THREAD_SIZE ! $sp
+
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ */
+ .align
+ .type __mmap_switched, %function
+__mmap_switched:
+ la $r3, __switch_data
+ lmw.bim $r6, [$r3], $r9, #0b0001
+ move $r25, $r9
+ move $fp, #0 ! Clear BSS (and zero $fp)
+ beq $r7, $r6, _RRT
+1: swi.bi $fp, [$r6], #4
+ bne $r7, $r6, 1b
+ swi $r10, [$r8]
+
+_RRT:
+ b start_kernel
+
+__error:
+ b __error
diff --git a/arch/nds32/kernel/irq.c b/arch/nds32/kernel/irq.c
new file mode 100644
index 000000000000..6ff5a672be27
--- /dev/null
+++ b/arch/nds32/kernel/irq.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/irqchip.h>
+
+void __init init_IRQ(void)
+{
+ irqchip_init();
+}
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
new file mode 100644
index 000000000000..4167283d8293
--- /dev/null
+++ b/arch/nds32/kernel/module.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleloader.h>
+#include <asm/pgtable.h>
+
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+
+void module_free(struct module *module, void *region)
+{
+ vfree(region);
+}
+
+int module_frob_arch_sections(Elf_Ehdr * hdr,
+ Elf_Shdr * sechdrs,
+ char *secstrings, struct module *mod)
+{
+ return 0;
+}
+
+void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
+ unsigned int val_shift, unsigned int loc_mask,
+ unsigned int partial_in_place, unsigned int swap)
+{
+ unsigned int tmp = 0, tmp2 = 0;
+
+ __asm__ __volatile__("\tlhi.bi\t%0, [%2], 0\n"
+ "\tbeqz\t%3, 1f\n"
+ "\twsbh\t%0, %1\n"
+ "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap)
+ );
+
+ tmp2 = tmp & loc_mask;
+ if (partial_in_place) {
+ tmp &= (!loc_mask);
+ tmp =
+ tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
+ } else {
+ tmp = tmp2 | ((val & val_mask) >> val_shift);
+ }
+
+ __asm__ __volatile__("\tbeqz\t%3, 2f\n"
+ "\twsbh\t%0, %1\n"
+ "2:\n"
+ "\tshi.bi\t%0, [%2], 0\n":"=r"(tmp):"0"(tmp),
+ "r"(loc), "r"(swap)
+ );
+}
+
+void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
+ unsigned int val_shift, unsigned int loc_mask,
+ unsigned int partial_in_place, unsigned int swap)
+{
+ unsigned int tmp = 0, tmp2 = 0;
+
+ __asm__ __volatile__("\tlmw.bi\t%0, [%2], %0, 0\n"
+ "\tbeqz\t%3, 1f\n"
+ "\twsbh\t%0, %1\n"
+ "\trotri\t%0, %1, 16\n"
+ "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap)
+ );
+
+ tmp2 = tmp & loc_mask;
+ if (partial_in_place) {
+ tmp &= (!loc_mask);
+ tmp =
+ tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
+ } else {
+ tmp = tmp2 | ((val & val_mask) >> val_shift);
+ }
+
+ __asm__ __volatile__("\tbeqz\t%3, 2f\n"
+ "\twsbh\t%0, %1\n"
+ "\trotri\t%0, %1, 16\n"
+ "2:\n"
+ "\tsmw.bi\t%0, [%2], %0, 0\n":"=r"(tmp):"0"(tmp),
+ "r"(loc), "r"(swap)
+ );
+}
+
+static inline int exceed_limit(int offset, unsigned int val_mask,
+ struct module *module, Elf32_Rela * rel,
+ unsigned int relindex, unsigned int reloc_order)
+{
+ int abs_off = offset < 0 ? ~offset : offset;
+
+ if (abs_off & (~val_mask)) {
+ pr_err("\n%s: relocation type %d out of range.\n"
+ "please rebuild the kernel module with gcc option \"-Wa,-mno-small-text\".\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ pr_err("section %d reloc %d offset 0x%x relative 0x%x.\n",
+ relindex, reloc_order, rel->r_offset, offset);
+ return true;
+ }
+ return false;
+}
+
+#ifdef __NDS32_EL__
+#define NEED_SWAP 1
+#else
+#define NEED_SWAP 0
+#endif
+
+int
+apply_relocate_add(Elf32_Shdr * sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relindex,
+ struct module *module)
+{
+ Elf32_Shdr *symsec = sechdrs + symindex;
+ Elf32_Shdr *relsec = sechdrs + relindex;
+ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+ Elf32_Rela *rel = (void *)relsec->sh_addr;
+ unsigned int i;
+
+ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) {
+ Elf32_Addr *loc;
+ Elf32_Sym *sym;
+ Elf32_Addr v;
+ s32 offset;
+
+ offset = ELF32_R_SYM(rel->r_info);
+ if (offset < 0
+ || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
+ pr_err("%s: bad relocation\n", module->name);
+ pr_err("section %d reloc %d\n", relindex, i);
+ return -ENOEXEC;
+ }
+
+ sym = ((Elf32_Sym *) symsec->sh_addr) + offset;
+
+ if (rel->r_offset < 0
+ || rel->r_offset > dstsec->sh_size - sizeof(u16)) {
+ pr_err("%s: out of bounds relocation\n", module->name);
+ pr_err("section %d reloc %d offset 0x%0x size %d\n",
+ relindex, i, rel->r_offset, dstsec->sh_size);
+ return -ENOEXEC;
+ }
+
+ loc = (Elf32_Addr *) (dstsec->sh_addr + rel->r_offset);
+ v = sym->st_value + rel->r_addend;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_NDS32_NONE:
+ case R_NDS32_INSN16:
+ case R_NDS32_LABEL:
+ case R_NDS32_LONGCALL1:
+ case R_NDS32_LONGCALL2:
+ case R_NDS32_LONGCALL3:
+ case R_NDS32_LONGCALL4:
+ case R_NDS32_LONGJUMP1:
+ case R_NDS32_LONGJUMP2:
+ case R_NDS32_LONGJUMP3:
+ case R_NDS32_9_FIXED_RELA:
+ case R_NDS32_15_FIXED_RELA:
+ case R_NDS32_17_FIXED_RELA:
+ case R_NDS32_25_FIXED_RELA:
+ case R_NDS32_LOADSTORE:
+ case R_NDS32_DWARF2_OP1_RELA:
+ case R_NDS32_DWARF2_OP2_RELA:
+ case R_NDS32_DWARF2_LEB_RELA:
+ case R_NDS32_RELA_NOP_MIX ... R_NDS32_RELA_NOP_MAX:
+ break;
+
+ case R_NDS32_32_RELA:
+ do_reloc32(v, loc, 0xffffffff, 0, 0, 0, 0);
+ break;
+
+ case R_NDS32_HI20_RELA:
+ do_reloc32(v, loc, 0xfffff000, 12, 0xfff00000, 0,
+ NEED_SWAP);
+ break;
+
+ case R_NDS32_LO12S3_RELA:
+ do_reloc32(v, loc, 0x00000fff, 3, 0xfffff000, 0,
+ NEED_SWAP);
+ break;
+
+ case R_NDS32_LO12S2_RELA:
+ do_reloc32(v, loc, 0x00000fff, 2, 0xfffff000, 0,
+ NEED_SWAP);
+ break;
+
+ case R_NDS32_LO12S1_RELA:
+ do_reloc32(v, loc, 0x00000fff, 1, 0xfffff000, 0,
+ NEED_SWAP);
+ break;
+
+ case R_NDS32_LO12S0_RELA:
+ case R_NDS32_LO12S0_ORI_RELA:
+ do_reloc32(v, loc, 0x00000fff, 0, 0xfffff000, 0,
+ NEED_SWAP);
+ break;
+
+ case R_NDS32_9_PCREL_RELA:
+ if (exceed_limit
+ ((v - (Elf32_Addr) loc), 0x000000ff, module, rel,
+ relindex, i))
+ return -ENOEXEC;
+ do_reloc16(v - (Elf32_Addr) loc, loc, 0x000001ff, 1,
+ 0xffffff00, 0, NEED_SWAP);
+ break;
+
+ case R_NDS32_15_PCREL_RELA:
+ if (exceed_limit
+ ((v - (Elf32_Addr) loc), 0x00003fff, module, rel,
+ relindex, i))
+ return -ENOEXEC;
+ do_reloc32(v - (Elf32_Addr) loc, loc, 0x00007fff, 1,
+ 0xffffc000, 0, NEED_SWAP);
+ break;
+
+ case R_NDS32_17_PCREL_RELA:
+ if (exceed_limit
+ ((v - (Elf32_Addr) loc), 0x0000ffff, module, rel,
+ relindex, i))
+ return -ENOEXEC;
+ do_reloc32(v - (Elf32_Addr) loc, loc, 0x0001ffff, 1,
+ 0xffff0000, 0, NEED_SWAP);
+ break;
+
+ case R_NDS32_25_PCREL_RELA:
+ if (exceed_limit
+ ((v - (Elf32_Addr) loc), 0x00ffffff, module, rel,
+ relindex, i))
+ return -ENOEXEC;
+ do_reloc32(v - (Elf32_Addr) loc, loc, 0x01ffffff, 1,
+ 0xff000000, 0, NEED_SWAP);
+ break;
+ case R_NDS32_WORD_9_PCREL_RELA:
+ if (exceed_limit
+ ((v - (Elf32_Addr) loc), 0x000000ff, module, rel,
+ relindex, i))
+ return -ENOEXEC;
+ do_reloc32(v - (Elf32_Addr) loc, loc, 0x000001ff, 1,
+ 0xffffff00, 0, NEED_SWAP);
+ break;
+
+ case R_NDS32_SDA15S3_RELA:
+ case R_NDS32_SDA15S2_RELA:
+ case R_NDS32_SDA15S1_RELA:
+ case R_NDS32_SDA15S0_RELA:
+ pr_err("%s: unsupported relocation type %d.\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ pr_err
+ ("Small data section access doesn't work in the kernel space; "
+ "please rebuild the kernel module with gcc option -mcmodel=large.\n");
+ pr_err("section %d reloc %d offset 0x%x size %d\n",
+ relindex, i, rel->r_offset, dstsec->sh_size);
+ break;
+
+ default:
+ pr_err("%s: unsupported relocation type %d.\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ pr_err("section %d reloc %d offset 0x%x size %d\n",
+ relindex, i, rel->r_offset, dstsec->sh_size);
+ }
+ }
+ return 0;
+}
+
+int
+module_finalize(const Elf32_Ehdr * hdr, const Elf_Shdr * sechdrs,
+ struct module *module)
+{
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c
new file mode 100644
index 000000000000..5ecebd0e60cb
--- /dev/null
+++ b/arch/nds32/kernel/nds32_ksyms.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/ftrace.h>
+#include <asm/proc-fns.h>
+
+/* mem functions */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memzero);
+
+/* user mem (segment) */
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
+EXPORT_SYMBOL(__arch_clear_user);
+
+/* cache handling */
+EXPORT_SYMBOL(cpu_icache_inval_all);
+EXPORT_SYMBOL(cpu_dcache_wbinval_all);
+EXPORT_SYMBOL(cpu_dma_inval_range);
+EXPORT_SYMBOL(cpu_dma_wb_range);
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
new file mode 100644
index 000000000000..65fda986e55f
--- /dev/null
+++ b/arch/nds32/kernel/process.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/delay.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <asm/elf.h>
+#include <asm/proc-fns.h>
+#include <linux/ptrace.h>
+#include <linux/reboot.h>
+
+extern void setup_mm_for_reboot(char mode);
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *proc_dir_cpu;
+EXPORT_SYMBOL(proc_dir_cpu);
+#endif
+
+extern inline void arch_reset(char mode)
+{
+ if (mode == 's') {
+ /* Use cpu handler, jump to 0 */
+ cpu_reset(0);
+ }
+}
+
+void (*pm_power_off) (void);
+EXPORT_SYMBOL(pm_power_off);
+
+static char reboot_mode_nds32 = 'h';
+
+int __init reboot_setup(char *str)
+{
+ reboot_mode_nds32 = str[0];
+ return 1;
+}
+
+static int cpub_pwroff(void)
+{
+ return 0;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_halt(void)
+{
+ cpub_pwroff();
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void machine_restart(char *cmd)
+{
+ /*
+ * Clean and disable cache, and turn off interrupts
+ */
+ cpu_proc_fin();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(reboot_mode_nds32);
+
+ /* Execute kernel restart handler call chain */
+ do_kernel_restart(cmd);
+
+ /*
+ * Now call the architecture specific reboot code.
+ */
+ arch_reset(reboot_mode_nds32);
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ pr_info("Reboot failed -- System halted\n");
+ while (1) ;
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void show_regs(struct pt_regs *regs)
+{
+ printk("PC is at %pS\n", (void *)instruction_pointer(regs));
+ printk("LP is at %pS\n", (void *)regs->lp);
+ pr_info("pc : [<%08lx>] lp : [<%08lx>] %s\n"
+ "sp : %08lx fp : %08lx gp : %08lx\n",
+ instruction_pointer(regs),
+ regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
+ pr_info("r25: %08lx r24: %08lx\n", regs->uregs[25], regs->uregs[24]);
+
+ pr_info("r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n",
+ regs->uregs[23], regs->uregs[22],
+ regs->uregs[21], regs->uregs[20]);
+ pr_info("r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n",
+ regs->uregs[19], regs->uregs[18],
+ regs->uregs[17], regs->uregs[16]);
+ pr_info("r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n",
+ regs->uregs[15], regs->uregs[14],
+ regs->uregs[13], regs->uregs[12]);
+ pr_info("r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->uregs[11], regs->uregs[10],
+ regs->uregs[9], regs->uregs[8]);
+ pr_info("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
+ pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
+ pr_info(" IRQs o%s Segment %s\n",
+ interrupts_enabled(regs) ? "n" : "ff",
+ segment_eq(get_fs(), get_ds())? "kernel" : "user");
+}
+
+EXPORT_SYMBOL(show_regs);
+
+void flush_thread(void)
+{
+}
+
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ /* kernel thread fn */
+ p->thread.cpu_context.r6 = stack_start;
+ /* kernel thread argument */
+ p->thread.cpu_context.r7 = stk_sz;
+ } else {
+ *childregs = *current_pt_regs();
+ if (stack_start)
+ childregs->sp = stack_start;
+ /* child get zero as ret. */
+ childregs->uregs[0] = 0;
+ childregs->osp = 0;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->uregs[25] = childregs->uregs[3];
+ }
+ /* cpu context switching */
+ p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
+ p->thread.cpu_context.sp = (unsigned long)childregs;
+
+#ifdef CONFIG_HWZOL
+ childregs->lb = 0;
+ childregs->le = 0;
+ childregs->lc = 0;
+#endif
+
+ return 0;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
+{
+ int fpvalid = 0;
+ return fpvalid;
+}
+
+EXPORT_SYMBOL(dump_fpu);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, lr;
+ unsigned long stack_start, stack_end;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
+ stack_start = (unsigned long)end_of_stack(p);
+ stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+
+ fp = thread_saved_fp(p);
+ do {
+ if (fp < stack_start || fp > stack_end)
+ return 0;
+ lr = ((unsigned long *)fp)[0];
+ if (!in_sched_functions(lr))
+ return lr;
+ fp = *(unsigned long *)(fp + 4);
+ } while (count++ < 16);
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/nds32/kernel/ptrace.c b/arch/nds32/kernel/ptrace.c
new file mode 100644
index 000000000000..eaaf7a999b20
--- /dev/null
+++ b/arch/nds32/kernel/ptrace.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/sched/task_stack.h>
+
+enum nds32_regset {
+ REGSET_GPR,
+};
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user * ubuf)
+{
+ struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user * ubuf)
+{
+ int err;
+ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ if (err)
+ return err;
+
+ task_pt_regs(target)->user_regs = newregs;
+ return 0;
+}
+
+static const struct user_regset nds32_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct user_pt_regs) / sizeof(u32),
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .get = gpr_get,
+ .set = gpr_set}
+};
+
+static const struct user_regset_view nds32_user_view = {
+ .name = "nds32",
+ .e_machine = EM_NDS32,
+ .regsets = nds32_regsets,
+ .n = ARRAY_SIZE(nds32_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &nds32_user_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ user_disable_single_step(child);
+}
+
+/* do_ptrace()
+ *
+ * Provide ptrace defined service.
+ */
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+ unsigned long data)
+{
+ int ret = -EIO;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
+ regs->ipsw |= PSW_mskHSS;
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs;
+ regs = task_pt_regs(child);
+ regs->ipsw &= ~PSW_mskHSS;
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* sys_trace()
+ *
+ * syscall trace handler.
+ */
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (tracehook_report_syscall_entry(regs))
+ forget_syscall(regs);
+ }
+ return regs->syscallno;
+}
+
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ int step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+
+}
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
new file mode 100644
index 000000000000..ba910e9e4ecb
--- /dev/null
+++ b/arch/nds32/kernel/setup.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/cpu.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/memblock.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/proc-fns.h>
+#include <asm/cache_info.h>
+#include <asm/elf.h>
+#include <nds32_intrinsic.h>
+
+#define HWCAP_MFUSR_PC 0x000001
+#define HWCAP_EXT 0x000002
+#define HWCAP_EXT2 0x000004
+#define HWCAP_FPU 0x000008
+#define HWCAP_AUDIO 0x000010
+#define HWCAP_BASE16 0x000020
+#define HWCAP_STRING 0x000040
+#define HWCAP_REDUCED_REGS 0x000080
+#define HWCAP_VIDEO 0x000100
+#define HWCAP_ENCRYPT 0x000200
+#define HWCAP_EDM 0x000400
+#define HWCAP_LMDMA 0x000800
+#define HWCAP_PFM 0x001000
+#define HWCAP_HSMP 0x002000
+#define HWCAP_TRACE 0x004000
+#define HWCAP_DIV 0x008000
+#define HWCAP_MAC 0x010000
+#define HWCAP_L2C 0x020000
+#define HWCAP_FPU_DP 0x040000
+#define HWCAP_V2 0x080000
+#define HWCAP_DX_REGS 0x100000
+
+unsigned long cpu_id, cpu_rev, cpu_cfgid;
+char cpu_series;
+char *endianness = NULL;
+
+unsigned int __atags_pointer __initdata;
+unsigned int elf_hwcap;
+EXPORT_SYMBOL(elf_hwcap);
+
+/*
+ * The following string table, must sync with HWCAP_xx bitmask,
+ * which is defined in <asm/procinfo.h>
+ */
+static const char *hwcap_str[] = {
+ "mfusr_pc",
+ "perf1",
+ "perf2",
+ "fpu",
+ "audio",
+ "16b",
+ "string",
+ "reduced_regs",
+ "video",
+ "encrypt",
+ "edm",
+ "lmdma",
+ "pfm",
+ "hsmp",
+ "trace",
+ "div",
+ "mac",
+ "l2c",
+ "dx_regs",
+ "v2",
+ NULL,
+};
+
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define WRITE_METHOD "write through"
+#else
+#define WRITE_METHOD "write back"
+#endif
+
+struct cache_info L1_cache_info[2];
+static void __init dump_cpu_info(int cpu)
+{
+ int i, p = 0;
+ char str[sizeof(hwcap_str) + 16];
+
+ for (i = 0; hwcap_str[i]; i++) {
+ if (elf_hwcap & (1 << i)) {
+ sprintf(str + p, "%s ", hwcap_str[i]);
+ p += strlen(hwcap_str[i]) + 1;
+ }
+ }
+
+ pr_info("CPU%d Features: %s\n", cpu, str);
+
+ L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
+ L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
+ L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
+ L1_cache_info[ICACHE].size =
+ L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
+ L1_cache_info[ICACHE].sets / 1024;
+ pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
+ L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
+ L1_cache_info[ICACHE].line_size);
+ L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
+ L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
+ L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
+ L1_cache_info[DCACHE].size =
+ L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
+ L1_cache_info[DCACHE].sets / 1024;
+ pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
+ L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
+ L1_cache_info[DCACHE].line_size);
+ pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
+ if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
+ pr_crit
+ ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
+ L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
+#ifdef CONFIG_CPU_CACHE_ALIASING
+ {
+ int aliasing_num;
+ aliasing_num =
+ L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
+ L1_cache_info[ICACHE].ways;
+ L1_cache_info[ICACHE].aliasing_num = aliasing_num;
+ L1_cache_info[ICACHE].aliasing_mask =
+ (aliasing_num - 1) << PAGE_SHIFT;
+ aliasing_num =
+ L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
+ L1_cache_info[DCACHE].ways;
+ L1_cache_info[DCACHE].aliasing_num = aliasing_num;
+ L1_cache_info[DCACHE].aliasing_mask =
+ (aliasing_num - 1) << PAGE_SHIFT;
+ }
+#endif
+}
+
+static void __init setup_cpuinfo(void)
+{
+ unsigned long tmp = 0, cpu_name;
+
+ cpu_dcache_inval_all();
+ cpu_icache_inval_all();
+ __nds32__isb();
+
+ cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
+ cpu_name = ((cpu_id) & 0xf0) >> 4;
+ cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
+ cpu_id = cpu_id & 0xf;
+ cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
+ cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
+
+ pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
+ cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
+
+ elf_hwcap |= HWCAP_MFUSR_PC;
+
+ if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
+ elf_hwcap |= HWCAP_DIV;
+
+ if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
+ || (cpu_id == 12 && cpu_rev < 4))
+ elf_hwcap |= HWCAP_MAC;
+ } else {
+ elf_hwcap |= HWCAP_V2;
+ elf_hwcap |= HWCAP_DIV;
+ elf_hwcap |= HWCAP_MAC;
+ }
+
+ if (cpu_cfgid & 0x0001)
+ elf_hwcap |= HWCAP_EXT;
+
+ if (cpu_cfgid & 0x0002)
+ elf_hwcap |= HWCAP_BASE16;
+
+ if (cpu_cfgid & 0x0004)
+ elf_hwcap |= HWCAP_EXT2;
+
+ if (cpu_cfgid & 0x0008)
+ elf_hwcap |= HWCAP_FPU;
+
+ if (cpu_cfgid & 0x0010)
+ elf_hwcap |= HWCAP_STRING;
+
+ if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
+ endianness = "MSB";
+ else
+ endianness = "LSB";
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
+ elf_hwcap |= HWCAP_EDM;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
+ elf_hwcap |= HWCAP_LMDMA;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
+ elf_hwcap |= HWCAP_PFM;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
+ elf_hwcap |= HWCAP_HSMP;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
+ elf_hwcap |= HWCAP_TRACE;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
+ elf_hwcap |= HWCAP_AUDIO;
+
+ if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
+ elf_hwcap |= HWCAP_L2C;
+
+ tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
+ if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
+ tmp |= CACHE_CTL_mskDC_EN;
+
+ if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
+ tmp |= CACHE_CTL_mskIC_EN;
+ __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
+
+ dump_cpu_info(smp_processor_id());
+}
+
+static void __init setup_memory(void)
+{
+ unsigned long ram_start_pfn;
+ unsigned long free_ram_start_pfn;
+ phys_addr_t memory_start, memory_end;
+ struct memblock_region *region;
+
+ memory_end = memory_start = 0;
+
+ /* Find main memory where is the kernel */
+ for_each_memblock(memory, region) {
+ memory_start = region->base;
+ memory_end = region->base + region->size;
+ pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
+ memory_start, memory_end);
+ }
+
+ if (!memory_end) {
+ panic("No memory!");
+ }
+
+ ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
+ /* free_ram_start_pfn is first page after kernel */
+ free_ram_start_pfn = PFN_UP(__pa(&_end));
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+ /* it could update max_pfn */
+ if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
+ max_low_pfn = max_pfn;
+ else {
+ max_low_pfn = MAXMEM_PFN + ram_start_pfn;
+ if (!IS_ENABLED(CONFIG_HIGHMEM))
+ max_pfn = MAXMEM_PFN + ram_start_pfn;
+ }
+ /* high_memory is related with VMALLOC */
+ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+ min_low_pfn = free_ram_start_pfn;
+
+ /*
+ * initialize the boot-time allocator (with low memory only).
+ *
+ * This makes the memory from the end of the kernel to the end of
+ * RAM usable.
+ */
+ memblock_set_bottom_up(true);
+ memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+ memblock_dump_all();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ early_init_devtree( __dtb_start);
+
+ setup_cpuinfo();
+
+ init_mm.start_code = (unsigned long)&_stext;
+ init_mm.end_code = (unsigned long)&_etext;
+ init_mm.end_data = (unsigned long)&_edata;
+ init_mm.brk = (unsigned long)&_end;
+
+ /* setup bootmem allocator */
+ setup_memory();
+
+ /* paging_init() sets up the MMU and marks all pages as reserved */
+ paging_init();
+
+ /* use generic way to parse */
+ parse_early_param();
+
+ unflatten_and_copy_device_tree();
+
+ if(IS_ENABLED(CONFIG_VT)) {
+ if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
+ conswitchp = &dummy_con;
+ }
+
+ *cmdline_p = boot_command_line;
+ early_trap_init();
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
+ cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
+
+ seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
+ CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
+ CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
+ CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
+
+ seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
+ CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
+ CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
+ CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: ");
+
+ for (i = 0; hwcap_str[i]; i++)
+ if (elf_hwcap & (1 << i))
+ seq_printf(m, "%s ", hwcap_str[i]);
+
+ seq_puts(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t * pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
new file mode 100644
index 000000000000..5d01f6e33cb8
--- /dev/null
+++ b/arch/nds32/kernel/signal.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/unistd.h>
+
+#include <asm/ptrace.h>
+#include <asm/vdso.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static int restore_sigframe(struct pt_regs *regs,
+ struct rt_sigframe __user * sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0) {
+ set_current_blocked(&set);
+ }
+
+ __get_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err);
+ __get_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err);
+ __get_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err);
+ __get_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err);
+ __get_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err);
+ __get_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err);
+ __get_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err);
+ __get_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err);
+ __get_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err);
+ __get_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err);
+ __get_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err);
+ __get_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err);
+ __get_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err);
+ __get_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err);
+ __get_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err);
+ __get_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err);
+ __get_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err);
+ __get_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err);
+ __get_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err);
+ __get_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err);
+ __get_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err);
+ __get_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err);
+ __get_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err);
+ __get_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err);
+ __get_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err);
+ __get_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err);
+
+ __get_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err);
+ __get_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err);
+ __get_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err);
+ __get_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err);
+ __get_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err);
+#if defined(CONFIG_HWZOL)
+ __get_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
+ __get_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
+ __get_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
+#endif
+
+ /*
+ * Avoid sys_rt_sigreturn() restarting.
+ */
+ forget_syscall(regs);
+ return err;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be two-word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 7)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *)regs->sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (restore_sigframe(regs, frame))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->uregs[0];
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int
+setup_sigframe(struct rt_sigframe __user * sf, struct pt_regs *regs,
+ sigset_t * set)
+{
+ int err = 0;
+
+ __put_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err);
+ __put_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err);
+ __put_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err);
+ __put_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err);
+ __put_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err);
+ __put_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err);
+ __put_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err);
+ __put_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err);
+ __put_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err);
+ __put_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err);
+ __put_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err);
+ __put_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err);
+ __put_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err);
+ __put_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err);
+ __put_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err);
+ __put_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err);
+ __put_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err);
+ __put_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err);
+ __put_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err);
+ __put_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err);
+ __put_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err);
+
+ __put_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err);
+ __put_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err);
+ __put_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err);
+ __put_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err);
+ __put_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err);
+ __put_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err);
+ __put_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err);
+ __put_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err);
+ __put_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err);
+ __put_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err);
+#if defined(CONFIG_HWZOL)
+ __put_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
+ __put_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
+ __put_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
+#endif
+
+ __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no,
+ err);
+ __put_user_error(current->thread.error_code,
+ &sf->uc.uc_mcontext.error_code, err);
+ __put_user_error(current->thread.address,
+ &sf->uc.uc_mcontext.fault_address, err);
+ __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, int framesize)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->sp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = (sigsp(sp, ksig) - framesize);
+
+ /*
+ * nds32 mandates 8-byte alignment
+ */
+ sp &= ~0x7UL;
+
+ return (void __user *)sp;
+}
+
+static int
+setup_return(struct pt_regs *regs, struct ksignal *ksig, void __user * frame)
+{
+ unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+ unsigned long retcode;
+
+ retcode = VDSO_SYMBOL(current->mm->context.vdso, rt_sigtramp);
+ regs->uregs[0] = ksig->sig;
+ regs->sp = (unsigned long)frame;
+ regs->lp = retcode;
+ regs->ipc = handler;
+
+ return 0;
+}
+
+static int
+setup_rt_frame(struct ksignal *ksig, sigset_t * set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame =
+ get_sigframe(ksig, regs, sizeof(*frame));
+ int err = 0;
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ return -EFAULT;
+
+ __put_user_error(0, &frame->uc.uc_flags, err);
+ __put_user_error(NULL, &frame->uc.uc_link, err);
+
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= setup_sigframe(frame, regs, set);
+ if (err == 0) {
+ setup_return(regs, ksig, frame);
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+ regs->uregs[1] = (unsigned long)&frame->info;
+ regs->uregs[2] = (unsigned long)&frame->uc;
+ }
+ }
+ return err;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+
+ if (in_syscall(regs)) {
+ /* Avoid additional syscall restarting via ret_slow_syscall. */
+ forget_syscall(regs);
+
+ switch (regs->uregs[0]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->uregs[0] = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->uregs[0] = -EINTR;
+ break;
+ }
+ case -ERESTARTNOINTR:
+ regs->uregs[0] = regs->orig_r0;
+ regs->ipc -= 4;
+ break;
+ }
+ }
+ /*
+ * Set up the stack frame
+ */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (in_syscall(regs)) {
+ /* Restart the system call - no handlers present */
+
+ /* Avoid additional syscall restarting via ret_slow_syscall. */
+ forget_syscall(regs);
+
+ switch (regs->uregs[0]) {
+ case -ERESTART_RESTARTBLOCK:
+ regs->uregs[15] = __NR_restart_syscall;
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->uregs[0] = regs->orig_r0;
+ regs->ipc -= 0x4;
+ break;
+ }
+ }
+ restore_saved_sigmask();
+}
+
+asmlinkage void
+do_notify_resume(struct pt_regs *regs, unsigned int thread_flags)
+{
+ if (thread_flags & _TIF_SIGPENDING)
+ do_signal(regs);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+}
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
new file mode 100644
index 000000000000..bc70113c0e84
--- /dev/null
+++ b/arch/nds32/kernel/stacktrace.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned long *fpn;
+ int skip = trace->skip;
+ int savesched;
+
+ if (tsk == current) {
+ __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
+ savesched = 1;
+ } else {
+ fpn = (unsigned long *)thread_saved_fp(tsk);
+ savesched = 0;
+ }
+
+ while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3)
+ && (fpn >= (unsigned long *)TASK_SIZE)) {
+ unsigned long lpp, fpp;
+
+ lpp = fpn[-1];
+ fpp = fpn[FP_OFFSET];
+ if (!__kernel_text_address(lpp))
+ break;
+
+ if (savesched || !in_sched_functions(lpp)) {
+ if (skip) {
+ skip--;
+ } else {
+ trace->entries[trace->nr_entries++] = lpp;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+ fpn = (unsigned long *)fpp;
+ }
+}
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c
new file mode 100644
index 000000000000..9de93ab4c52b
--- /dev/null
+++ b/arch/nds32/kernel/sys_nds32.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/cachectl.h>
+#include <asm/proc-fns.h>
+
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, pgoff)
+{
+ if (pgoff & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
+}
+
+SYSCALL_DEFINE4(fadvise64_64_wrapper,int, fd, int, advice, loff_t, offset,
+ loff_t, len)
+{
+ return sys_fadvise64_64(fd, offset, len, advice);
+}
+
+SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache)
+{
+ struct vm_area_struct *vma;
+ bool flushi = true, wbd = true;
+
+ vma = find_vma(current->mm, start);
+ if (!vma)
+ return -EFAULT;
+ switch (cache) {
+ case ICACHE:
+ wbd = false;
+ break;
+ case DCACHE:
+ flushi = false;
+ break;
+ case BCACHE:
+ break;
+ default:
+ return -EINVAL;
+ }
+ cpu_cache_wbinval_range_check(vma, start, end, flushi, wbd);
+
+ return 0;
+}
diff --git a/arch/nds32/kernel/syscall_table.c b/arch/nds32/kernel/syscall_table.c
new file mode 100644
index 000000000000..7879c061b87f
--- /dev/null
+++ b/arch/nds32/kernel/syscall_table.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
+#define sys_fadvise64_64 sys_fadvise64_64_wrapper
+void *sys_call_table[__NR_syscalls] __aligned(8192) = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/nds32/kernel/time.c b/arch/nds32/kernel/time.c
new file mode 100644
index 000000000000..ac9d78ce3a81
--- /dev/null
+++ b/arch/nds32/kernel/time.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/clocksource.h>
+#include <linux/clk-provider.h>
+
+void __init time_init(void)
+{
+ of_clk_init(NULL);
+ timer_probe();
+}
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
new file mode 100644
index 000000000000..6e34eb9824a4
--- /dev/null
+++ b/arch/nds32/kernel/traps.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+
+#include <asm/proc-fns.h>
+#include <asm/unistd.h>
+
+#include <linux/ptrace.h>
+#include <nds32_intrinsic.h>
+
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
+{
+ unsigned long first;
+ mm_segment_t fs;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
+
+ for (first = bottom & ~31; first < top; first += 32) {
+ unsigned long p;
+ char str[sizeof(" 12345678") * 8 + 1];
+
+ memset(str, ' ', sizeof(str));
+ str[sizeof(str) - 1] = '\0';
+
+ for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+ if (p >= bottom && p < top) {
+ unsigned long val;
+ if (__get_user(val, (unsigned long *)p) == 0)
+ sprintf(str + i * 9, " %08lx", val);
+ else
+ sprintf(str + i * 9, " ????????");
+ }
+ }
+ pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
+ }
+
+ set_fs(fs);
+}
+
+EXPORT_SYMBOL(dump_mem);
+
+static void dump_instr(struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ mm_segment_t fs;
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ return;
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ pr_emerg("Code: ");
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+ bad = __get_user(val, &((u32 *) addr)[i]);
+
+ if (!bad) {
+ p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+ } else {
+ p += sprintf(p, "bad PC value");
+ break;
+ }
+ }
+ pr_emerg("Code: %s\n", str);
+
+ set_fs(fs);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#include <linux/ftrace.h>
+static void
+get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
+{
+ if (*addr == (unsigned long)return_to_handler) {
+ int index = tsk->curr_ret_stack;
+
+ if (tsk->ret_stack && index >= *graph) {
+ index -= *graph;
+ *addr = tsk->ret_stack[index].ret;
+ (*graph)++;
+ }
+ }
+}
+#else
+static inline void
+get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
+{
+}
+#endif
+
+#define LOOP_TIMES (100)
+static void __dump(struct task_struct *tsk, unsigned long *base_reg)
+{
+ unsigned long ret_addr;
+ int cnt = LOOP_TIMES, graph = 0;
+ pr_emerg("Call Trace:\n");
+ if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
+ while (!kstack_end(base_reg)) {
+ ret_addr = *base_reg++;
+ if (__kernel_text_address(ret_addr)) {
+ get_real_ret_addr(&ret_addr, tsk, &graph);
+ print_ip_sym(ret_addr);
+ }
+ if (--cnt < 0)
+ break;
+ }
+ } else {
+ while (!kstack_end((void *)base_reg) &&
+ !((unsigned long)base_reg & 0x3) &&
+ ((unsigned long)base_reg >= TASK_SIZE)) {
+ unsigned long next_fp;
+#if !defined(NDS32_ABI_2)
+ ret_addr = base_reg[0];
+ next_fp = base_reg[1];
+#else
+ ret_addr = base_reg[-1];
+ next_fp = base_reg[FP_OFFSET];
+#endif
+ if (__kernel_text_address(ret_addr)) {
+ get_real_ret_addr(&ret_addr, tsk, &graph);
+ print_ip_sym(ret_addr);
+ }
+ if (--cnt < 0)
+ break;
+ base_reg = (unsigned long *)next_fp;
+ }
+ }
+ pr_emerg("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long *base_reg;
+
+ if (!tsk)
+ tsk = current;
+ if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
+ if (tsk != current)
+ base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
+ else
+ __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
+ } else {
+ if (tsk != current)
+ base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
+ else
+ __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
+ }
+ __dump(tsk, base_reg);
+ barrier();
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+ struct task_struct *tsk = current;
+ static int die_counter;
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
+ print_modules();
+ pr_emerg("CPU: %i\n", smp_processor_id());
+ show_regs(regs);
+ pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
+ tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+
+ if (!user_mode(regs) || in_interrupt()) {
+ dump_mem("Stack: ", regs->sp,
+ THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+ dump_instr(regs);
+ dump_stack();
+ }
+
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+EXPORT_SYMBOL(die);
+
+void die_if_kernel(const char *str, struct pt_regs *regs, int err)
+{
+ if (user_mode(regs))
+ return;
+
+ die(str, regs, err);
+}
+
+int bad_syscall(int n, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (current->personality != PER_LINUX) {
+ send_sig(SIGSEGV, current, 1);
+ return regs->uregs[0];
+ }
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLTRP;
+ info.si_addr = (void __user *)instruction_pointer(regs) - 4;
+
+ force_sig_info(SIGILL, &info, current);
+ die_if_kernel("Oops - bad syscall", regs, n);
+ return regs->uregs[0];
+}
+
+void __pte_error(const char *file, int line, unsigned long val)
+{
+ pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
+}
+
+void __pmd_error(const char *file, int line, unsigned long val)
+{
+ pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
+}
+
+void __pgd_error(const char *file, int line, unsigned long val)
+{
+ pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
+}
+
+extern char *exception_vector, *exception_vector_end;
+void __init trap_init(void)
+{
+ return;
+}
+
+void __init early_trap_init(void)
+{
+ unsigned long ivb = 0;
+ unsigned long base = PAGE_OFFSET;
+
+ memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
+ ((unsigned long)&exception_vector_end -
+ (unsigned long)&exception_vector));
+ ivb = __nds32__mfsr(NDS32_SR_IVB);
+ /* Check platform support. */
+ if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
+ panic
+ ("IVIC mode is not allowed on the platform with interrupt controller\n");
+ __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
+ IVB_BASE, NDS32_SR_IVB);
+ __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
+
+ /*
+ * 0x800 = 128 vectors * 16byte.
+ * It should be enough to flush a page.
+ */
+ cpu_cache_wbinval_page(base, true);
+}
+
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code)
+{
+ struct siginfo info;
+
+ tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
+ tsk->thread.error_code = error_code;
+
+ memset(&info, 0, sizeof(info));
+ info.si_signo = SIGTRAP;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)instruction_pointer(regs);
+ force_sig_info(SIGTRAP, &info, tsk);
+}
+
+void do_debug_trap(unsigned long entry, unsigned long addr,
+ unsigned long type, struct pt_regs *regs)
+{
+ if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+
+ if (user_mode(regs)) {
+ /* trap_signal */
+ send_sigtrap(current, regs, 0, TRAP_BRKPT);
+ } else {
+ /* kernel_trap */
+ if (!fixup_exception(regs))
+ die("unexpected kernel_trap", regs, 0);
+ }
+}
+
+void unhandled_interruption(struct pt_regs *regs)
+{
+ siginfo_t si;
+ pr_emerg("unhandled_interruption\n");
+ show_regs(regs);
+ if (!user_mode(regs))
+ do_exit(SIGKILL);
+ si.si_signo = SIGKILL;
+ si.si_errno = 0;
+ force_sig_info(SIGKILL, &si, current);
+}
+
+void unhandled_exceptions(unsigned long entry, unsigned long addr,
+ unsigned long type, struct pt_regs *regs)
+{
+ siginfo_t si;
+ pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
+ addr, type);
+ show_regs(regs);
+ if (!user_mode(regs))
+ do_exit(SIGKILL);
+ si.si_signo = SIGKILL;
+ si.si_errno = 0;
+ si.si_addr = (void *)addr;
+ force_sig_info(SIGKILL, &si, current);
+}
+
+extern int do_page_fault(unsigned long entry, unsigned long addr,
+ unsigned int error_code, struct pt_regs *regs);
+
+/*
+ * 2:DEF dispatch for TLB MISC exception handler
+*/
+
+void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
+ unsigned long type, struct pt_regs *regs)
+{
+ type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
+ if ((type & ITYPE_mskETYPE) < 5) {
+ /* Permission exceptions */
+ do_page_fault(entry, addr, type, regs);
+ } else
+ unhandled_exceptions(entry, addr, type, regs);
+}
+
+void do_revinsn(struct pt_regs *regs)
+{
+ siginfo_t si;
+ pr_emerg("Reserved Instruction\n");
+ show_regs(regs);
+ if (!user_mode(regs))
+ do_exit(SIGILL);
+ si.si_signo = SIGILL;
+ si.si_errno = 0;
+ force_sig_info(SIGILL, &si, current);
+}
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+extern int unalign_access_mode;
+extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
+#endif
+void do_dispatch_general(unsigned long entry, unsigned long addr,
+ unsigned long itype, struct pt_regs *regs,
+ unsigned long oipc)
+{
+ unsigned int swid = itype >> ITYPE_offSWID;
+ unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
+ if (type == ETYPE_ALIGNMENT_CHECK) {
+#ifdef CONFIG_ALIGNMENT_TRAP
+ /* Alignment check */
+ if (user_mode(regs) && unalign_access_mode) {
+ int ret;
+ ret = do_unaligned_access(addr, regs);
+
+ if (ret == 0)
+ return;
+
+ if (ret == -EFAULT)
+ pr_emerg
+ ("Unhandled unaligned access exception\n");
+ }
+#endif
+ do_page_fault(entry, addr, type, regs);
+ } else if (type == ETYPE_RESERVED_INSTRUCTION) {
+ /* Reserved instruction */
+ do_revinsn(regs);
+ } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
+ /* trap, used on v3 EDM target debugging workaround */
+ /*
+ * DIPC(OIPC) is passed as parameter before
+ * interrupt is enabled, so the DIPC will not be corrupted
+ * even though interrupts are coming in
+ */
+ /*
+ * 1. update ipc
+ * 2. update pt_regs ipc with oipc
+ * 3. update pt_regs ipsw (clear DEX)
+ */
+ __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
+ regs->ipc = oipc;
+ if (regs->pipsw & PSW_mskDEX) {
+ pr_emerg
+ ("Nested Debug exception is possibly happened\n");
+ pr_emerg("ipc:%08x pipc:%08x\n",
+ (unsigned int)regs->ipc,
+ (unsigned int)regs->pipc);
+ }
+ do_debug_trap(entry, addr, itype, regs);
+ regs->ipsw &= ~PSW_mskDEX;
+ } else
+ unhandled_exceptions(entry, addr, type, regs);
+}
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
new file mode 100644
index 000000000000..f1198d7a5654
--- /dev/null
+++ b/arch/nds32/kernel/vdso.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/cache.h>
+#include <linux/clocksource.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <asm/cacheflush.h>
+#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vdso_timer_info.h>
+#include <asm/cache_info.h>
+extern struct cache_info L1_cache_info[2];
+extern char vdso_start, vdso_end;
+static unsigned long vdso_pages __ro_after_init;
+static unsigned long timer_mapping_base;
+
+struct timer_info_t timer_info = {
+ .cycle_count_down = true,
+ .mapping_base = EMPTY_TIMER_MAPPING,
+ .cycle_count_reg_offset = EMPTY_REG_OFFSET
+};
+/*
+ * The vDSO data page.
+ */
+static struct page *no_pages[] = { NULL };
+
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
+ {
+ .name = "[vvar]",
+ .pages = no_pages,
+ },
+ {
+ .name = "[vdso]",
+ },
+};
+
+static void get_timer_node_info(void)
+{
+ timer_mapping_base = timer_info.mapping_base;
+ vdso_data->cycle_count_offset =
+ timer_info.cycle_count_reg_offset;
+ vdso_data->cycle_count_down =
+ timer_info.cycle_count_down;
+}
+
+static int __init vdso_init(void)
+{
+ int i;
+ struct page **vdso_pagelist;
+
+ if (memcmp(&vdso_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
+ /* Creat a timer io mapping to get clock cycles counter */
+ get_timer_node_info();
+
+ vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+ vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
+
+ /* Allocate the vDSO pagelist */
+ vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
+ if (vdso_pagelist == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < vdso_pages; i++)
+ vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ vdso_spec[1].pages = &vdso_pagelist[0];
+
+ return 0;
+}
+
+arch_initcall(vdso_init);
+
+unsigned long inline vdso_random_addr(unsigned long vdso_mapping_len)
+{
+ unsigned long start = current->mm->mmap_base, end, offset, addr;
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + vdso_mapping_len + PMD_SIZE - 1) & PMD_MASK;
+ if (end >= TASK_SIZE)
+ end = TASK_SIZE;
+ end -= vdso_mapping_len;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
+ return addr;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+ struct vm_area_struct *vma;
+ unsigned long addr = 0;
+ pgprot_t prot;
+ int ret, vvar_page_num = 2;
+
+ vdso_text_len = vdso_pages << PAGE_SHIFT;
+
+ if(timer_mapping_base == EMPTY_VALUE)
+ vvar_page_num = 1;
+ /* Be sure to map the data page */
+ vdso_mapping_len = vdso_text_len + vvar_page_num * PAGE_SIZE;
+#ifdef CONFIG_CPU_CACHE_ALIASING
+ vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
+#endif
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ addr = vdso_random_addr(vdso_mapping_len);
+ vdso_base = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto up_fail;
+ }
+
+#ifdef CONFIG_CPU_CACHE_ALIASING
+ {
+ unsigned int aliasing_mask =
+ L1_cache_info[DCACHE].aliasing_mask;
+ unsigned int page_colour_ofs;
+ page_colour_ofs = ((unsigned int)vdso_data & aliasing_mask) -
+ (vdso_base & aliasing_mask);
+ vdso_base += page_colour_ofs & aliasing_mask;
+ }
+#endif
+
+ vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE,
+ VM_READ | VM_MAYREAD, &vdso_spec[0]);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto up_fail;
+ }
+
+ /*Map vdata to user space */
+ ret = io_remap_pfn_range(vma, vdso_base,
+ virt_to_phys(vdso_data) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
+ if (ret)
+ goto up_fail;
+
+ /*Map timer to user space */
+ vdso_base += PAGE_SIZE;
+ prot = __pgprot(_PAGE_V | _PAGE_M_UR_KR | _PAGE_D | _PAGE_C_DEV);
+ ret = io_remap_pfn_range(vma, vdso_base, timer_mapping_base >> PAGE_SHIFT,
+ PAGE_SIZE, prot);
+ if (ret)
+ goto up_fail;
+
+ /*Map vdso to user space */
+ vdso_base += PAGE_SIZE;
+ mm->context.vdso = (void *)vdso_base;
+ vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &vdso_spec[1]);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto up_fail;
+ }
+
+ up_write(&mm->mmap_sem);
+ return 0;
+
+up_fail:
+ mm->context.vdso = NULL;
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+static void vdso_write_begin(struct vdso_data *vdata)
+{
+ ++vdso_data->seq_count;
+ smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
+}
+
+static void vdso_write_end(struct vdso_data *vdata)
+{
+ smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
+ ++vdso_data->seq_count;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+ vdso_write_begin(vdso_data);
+ vdso_data->cs_mask = tk->tkr_mono.mask;
+ vdso_data->cs_mult = tk->tkr_mono.mult;
+ vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ vdso_data->xtime_clock_sec = tk->xtime_sec;
+ vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
+ vdso_write_end(vdso_data);
+}
+
+void update_vsyscall_tz(void)
+{
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+}
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile
new file mode 100644
index 000000000000..e6c50a701313
--- /dev/null
+++ b/arch/nds32/kernel/vdso/Makefile
@@ -0,0 +1,82 @@
+#
+# Building a vDSO image for AArch64.
+#
+# Author: Will Deacon <will.deacon@arm.com>
+# Heavily based on the vDSO Makefiles for other archs.
+#
+
+obj-vdso := note.o datapage.o sigreturn.o gettimeofday.o
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+ccflags-y := -shared -fno-common -fno-builtin
+ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ccflags-y += -fPIC -Wl,-shared -g
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+
+obj-y += vdso.o
+extra-y += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+ $(call if_changed,vdsold)
+
+
+# Strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+define cmd_vdsosym
+ $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+endef
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+
+
+# Assembly rules for the .S files
+
+sigreturn.o : sigreturn.S
+ $(call if_changed_dep,vdsoas)
+
+note.o : note.S
+ $(call if_changed_dep,vdsoas)
+
+datapage.o : datapage.S
+ $(call if_changed_dep,vdsoas)
+
+gettimeofday.o : gettimeofday.c FORCE
+ $(call if_changed_dep,vdsocc)
+
+# Actual build commands
+quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdsocc = VDSOA $@
+ cmd_vdsocc = $(CC) $(c_flags) -c -o $@ $<
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/nds32/kernel/vdso/datapage.S b/arch/nds32/kernel/vdso/datapage.S
new file mode 100644
index 000000000000..4a62c3cab1c8
--- /dev/null
+++ b/arch/nds32/kernel/vdso/datapage.S
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ENTRY(__get_timerpage)
+ sethi $r0, hi20(. + PAGE_SIZE + 8)
+ ori $r0, $r0, lo12(. + PAGE_SIZE + 4)
+ mfusr $r1, $pc
+ sub $r0, $r1, $r0
+ ret
+ENDPROC(__get_timerpage)
+
+ENTRY(__get_datapage)
+ sethi $r0, hi20(. + 2*PAGE_SIZE + 8)
+ ori $r0, $r0, lo12(. + 2*PAGE_SIZE + 4)
+ mfusr $r1, $pc
+ sub $r0, $r1, $r0
+ ret
+ENDPROC(__get_datapage)
diff --git a/arch/nds32/kernel/vdso/gen_vdso_offsets.sh b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..01924ff071ad
--- /dev/null
+++ b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
new file mode 100644
index 000000000000..038721af40e3
--- /dev/null
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/compiler.h>
+#include <linux/hrtimer.h>
+#include <linux/time.h>
+#include <asm/io.h>
+#include <asm/barrier.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/unistd.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vdso_timer_info.h>
+#include <asm/asm-offsets.h>
+
+#define X(x) #x
+#define Y(x) X(x)
+
+extern struct vdso_data *__get_datapage(void);
+extern struct vdso_data *__get_timerpage(void);
+
+static notrace unsigned int __vdso_read_begin(const struct vdso_data *vdata)
+{
+ u32 seq;
+repeat:
+ seq = READ_ONCE(vdata->seq_count);
+ if (seq & 1) {
+ cpu_relax();
+ goto repeat;
+ }
+ return seq;
+}
+
+static notrace unsigned int vdso_read_begin(const struct vdso_data *vdata)
+{
+ unsigned int seq;
+
+ seq = __vdso_read_begin(vdata);
+
+ smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
+ return seq;
+}
+
+static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
+{
+ smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
+ return vdata->seq_count != start;
+}
+
+static notrace long clock_gettime_fallback(clockid_t _clkid,
+ struct timespec *_ts)
+{
+ register struct timespec *ts asm("$r1") = _ts;
+ register clockid_t clkid asm("$r0") = _clkid;
+ register long ret asm("$r0");
+
+ asm volatile ("movi $r15, %3\n"
+ "syscall 0x0\n"
+ :"=r" (ret)
+ :"r"(clkid), "r"(ts), "i"(__NR_clock_gettime)
+ :"$r15", "memory");
+
+ return ret;
+}
+
+static notrace int do_realtime_coarse(struct timespec *ts,
+ struct vdso_data *vdata)
+{
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ ts->tv_sec = vdata->xtime_coarse_sec;
+ ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+ return 0;
+}
+
+static notrace int do_monotonic_coarse(struct timespec *ts,
+ struct vdso_data *vdata)
+{
+ struct timespec tomono;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ ts->tv_sec = vdata->xtime_coarse_sec;
+ ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+ tomono.tv_sec = vdata->wtm_clock_sec;
+ tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+
+ ts->tv_sec += tomono.tv_sec;
+ timespec_add_ns(ts, tomono.tv_nsec);
+ return 0;
+}
+
+static notrace inline u64 vgetsns(struct vdso_data *vdso)
+{
+ u32 cycle_now;
+ u32 cycle_delta;
+ u32 *timer_cycle_base;
+
+ timer_cycle_base =
+ (u32 *) ((char *)__get_timerpage() + vdso->cycle_count_offset);
+ cycle_now = readl_relaxed(timer_cycle_base);
+ if (true == vdso->cycle_count_down)
+ cycle_now = ~(*timer_cycle_base);
+ cycle_delta = cycle_now - (u32) vdso->cs_cycle_last;
+ return ((u64) cycle_delta & vdso->cs_mask) * vdso->cs_mult;
+}
+
+static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
+{
+ unsigned count;
+ u64 ns;
+ do {
+ count = vdso_read_begin(vdata);
+ ts->tv_sec = vdata->xtime_clock_sec;
+ ns = vdata->xtime_clock_nsec;
+ ns += vgetsns(vdata);
+ ns >>= vdata->cs_shift;
+ } while (vdso_read_retry(vdata, count));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
+{
+ struct timespec tomono;
+ u64 nsecs;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vdata);
+
+ ts->tv_sec = vdata->xtime_clock_sec;
+ nsecs = vdata->xtime_clock_nsec;
+ nsecs += vgetsns(vdata);
+ nsecs >>= vdata->cs_shift;
+
+ tomono.tv_sec = vdata->wtm_clock_sec;
+ tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+ } while (vdso_read_retry(vdata, seq));
+
+ ts->tv_sec += tomono.tv_sec;
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsecs + tomono.tv_nsec);
+ return 0;
+}
+
+notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
+{
+ struct vdso_data *vdata;
+ int ret = -1;
+
+ vdata = __get_datapage();
+ if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
+ return clock_gettime_fallback(clkid, ts);
+
+ switch (clkid) {
+ case CLOCK_REALTIME_COARSE:
+ ret = do_realtime_coarse(ts, vdata);
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ ret = do_monotonic_coarse(ts, vdata);
+ break;
+ case CLOCK_REALTIME:
+ ret = do_realtime(ts, vdata);
+ break;
+ case CLOCK_MONOTONIC:
+ ret = do_monotonic(ts, vdata);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ ret = clock_gettime_fallback(clkid, ts);
+
+ return ret;
+}
+
+static notrace int clock_getres_fallback(clockid_t _clk_id,
+ struct timespec *_res)
+{
+ register clockid_t clk_id asm("$r0") = _clk_id;
+ register struct timespec *res asm("$r1") = _res;
+ register int ret asm("$r0");
+
+ asm volatile ("movi $r15, %3\n"
+ "syscall 0x0\n"
+ :"=r" (ret)
+ :"r"(clk_id), "r"(res), "i"(__NR_clock_getres)
+ :"$r15", "memory");
+
+ return ret;
+}
+
+notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
+{
+ if (res == NULL)
+ return 0;
+ switch (clk_id) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_MONOTONIC_RAW:
+ res->tv_sec = 0;
+ res->tv_nsec = CLOCK_REALTIME_RES;
+ break;
+ case CLOCK_REALTIME_COARSE:
+ case CLOCK_MONOTONIC_COARSE:
+ res->tv_sec = 0;
+ res->tv_nsec = CLOCK_COARSE_RES;
+ break;
+ default:
+ return clock_getres_fallback(clk_id, res);
+ }
+ return 0;
+}
+
+static notrace inline int gettimeofday_fallback(struct timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timeval *tv asm("$r0") = _tv;
+ register struct timezone *tz asm("$r1") = _tz;
+ register int ret asm("$r0");
+
+ asm volatile ("movi $r15, %3\n"
+ "syscall 0x0\n"
+ :"=r" (ret)
+ :"r"(tv), "r"(tz), "i"(__NR_gettimeofday)
+ :"$r15", "memory");
+
+ return ret;
+}
+
+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ struct timespec ts;
+ struct vdso_data *vdata;
+ int ret;
+
+ vdata = __get_datapage();
+
+ if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
+ return gettimeofday_fallback(tv, tz);
+
+ ret = do_realtime(&ts, vdata);
+
+ if (tv) {
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / 1000;
+ }
+ if (tz) {
+ tz->tz_minuteswest = vdata->tz_minuteswest;
+ tz->tz_dsttime = vdata->tz_dsttime;
+ }
+
+ return ret;
+}
diff --git a/arch/nds32/kernel/vdso/note.S b/arch/nds32/kernel/vdso/note.S
new file mode 100644
index 000000000000..0aeaa19b05f0
--- /dev/null
+++ b/arch/nds32/kernel/vdso/note.S
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/nds32/kernel/vdso/sigreturn.S b/arch/nds32/kernel/vdso/sigreturn.S
new file mode 100644
index 000000000000..67e4d1d1612a
--- /dev/null
+++ b/arch/nds32/kernel/vdso/sigreturn.S
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+
+ENTRY(__kernel_rt_sigreturn)
+ .cfi_startproc
+ movi $r15, __NR_rt_sigreturn
+ /*
+ * The SWID of syscall should be __NR_rt_sigreturn to synchronize
+ * the unwinding scheme in gcc
+ */
+ syscall __NR_rt_sigreturn
+ .cfi_endproc
+ENDPROC(__kernel_rt_sigreturn)
diff --git a/arch/nds32/kernel/vdso/vdso.S b/arch/nds32/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..16737c11e55b
--- /dev/null
+++ b/arch/nds32/kernel/vdso/vdso.S
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+ .globl vdso_start, vdso_end
+ .section .rodata
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/nds32/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/nds32/kernel/vdso/vdso.lds.S b/arch/nds32/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..1f2b16004594
--- /dev/null
+++ b/arch/nds32/kernel/vdso/vdso.lds.S
@@ -0,0 +1,75 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2005-2017 Andes Technology Corporation
+ */
+
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_ARCH(nds32)
+
+SECTIONS
+{
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+
+ .text : { *(.text*) } :text
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_4 {
+ global:
+ __kernel_rt_sigreturn;
+ __vdso_gettimeofday;
+ __vdso_clock_getres;
+ __vdso_clock_gettime;
+ local: *;
+ };
+}
+
+/*
+ * Make the rt_sigreturn code visible to the kernel.
+ */
+VDSO_rt_sigtramp = __kernel_rt_sigreturn;
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..288313b886ef
--- /dev/null
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/cache.h>
+#include <asm/memory.h>
+
+#define LOAD_OFFSET (PAGE_OFFSET - PHYS_OFFSET)
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(nds32)
+ENTRY(_stext_lma)
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ _stext_lma = TEXTADDR - LOAD_OFFSET;
+ . = TEXTADDR;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ _stext = .;
+ /* Real text segment */
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ _text = .; /* Text and read-only data */
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ }
+
+ _etext = .; /* End of text and rodata section */
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ EXCEPTION_TABLE(16)
+ NOTES
+ BSS_SECTION(4, 4, 4)
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/nds32/lib/Makefile b/arch/nds32/lib/Makefile
new file mode 100644
index 000000000000..0f9840103f03
--- /dev/null
+++ b/arch/nds32/lib/Makefile
@@ -0,0 +1,3 @@
+lib-y := copy_page.o memcpy.o memmove.o \
+ memset.o memzero.o \
+ copy_from_user.o copy_to_user.o clear_user.o
diff --git a/arch/nds32/lib/clear_user.S b/arch/nds32/lib/clear_user.S
new file mode 100644
index 000000000000..805dfcd25bf8
--- /dev/null
+++ b/arch/nds32/lib/clear_user.S
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+ .text
+ .align 5
+ENTRY(__arch_clear_user)
+ add $r5, $r0, $r1
+ beqz $r1, clear_exit
+ xor $p1, $p1, $p1 ! Use $p1=0 to clear mem
+ srli $p0, $r1, #2 ! $p0 = number of word to clear
+ andi $r1, $r1, #3 ! Bytes less than a word to copy
+ beqz $p0, byte_clear ! Only less than a word to clear
+word_clear:
+USER( smw.bim,$p1, [$r0], $p1) ! Clear the word
+ addi $p0, $p0, #-1 ! Decrease word count
+ bnez $p0, word_clear ! Continue looping to clear all words
+ beqz $r1, clear_exit ! No left bytes to copy
+byte_clear:
+USER( sbi.bi, $p1, [$r0], #1) ! Clear the byte
+ addi $r1, $r1, #-1 ! Decrease byte count
+ bnez $r1, byte_clear ! Continue looping to clear all left bytes
+clear_exit:
+ move $r0, $r1 ! Set return value
+ ret
+
+ .section .fixup,"ax"
+ .align 0
+9001:
+ sub $r0, $r5, $r0 ! Bytes left to copy
+ ret
+ .previous
+ENDPROC(__arch_clear_user)
diff --git a/arch/nds32/lib/copy_from_user.S b/arch/nds32/lib/copy_from_user.S
new file mode 100644
index 000000000000..ad1857b20067
--- /dev/null
+++ b/arch/nds32/lib/copy_from_user.S
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+.macro lbi1 dst, addr, adj
+USER( lbi.bi, \dst, [\addr], \adj)
+.endm
+
+.macro sbi1 src, addr, adj
+sbi.bi \src, [\addr], \adj
+.endm
+
+.macro lmw1 start_reg, addr, end_reg
+USER( lmw.bim, \start_reg, [\addr], \end_reg)
+.endm
+
+.macro smw1 start_reg, addr, end_reg
+smw.bim \start_reg, [\addr], \end_reg
+.endm
+
+
+/* Prototype: int __arch_copy_from_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.text
+ENTRY(__arch_copy_from_user)
+ add $r5, $r0, $r2
+#include "copy_template.S"
+ move $r0, $r2
+ ret
+.section .fixup,"ax"
+.align 2
+9001:
+ sub $r0, $r5, $r0
+ ret
+.previous
+ENDPROC(__arch_copy_from_user)
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S
new file mode 100644
index 000000000000..4a2ff85f17ee
--- /dev/null
+++ b/arch/nds32/lib/copy_page.S
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ .text
+ENTRY(copy_page)
+ pushm $r2, $r10
+ movi $r2, PAGE_SIZE >> 5
+.Lcopy_loop:
+ lmw.bim $r3, [$r1], $r10
+ smw.bim $r3, [$r0], $r10
+ subi45 $r2, #1
+ bnez38 $r2, .Lcopy_loop
+ popm $r2, $r10
+ ret
+ENDPROC(copy_page)
+
+ENTRY(clear_page)
+ pushm $r1, $r9
+ movi $r1, PAGE_SIZE >> 5
+ movi55 $r2, #0
+ movi55 $r3, #0
+ movi55 $r4, #0
+ movi55 $r5, #0
+ movi55 $r6, #0
+ movi55 $r7, #0
+ movi55 $r8, #0
+ movi55 $r9, #0
+.Lclear_loop:
+ smw.bim $r2, [$r0], $r9
+ subi45 $r1, #1
+ bnez38 $r1, .Lclear_loop
+ popm $r1, $r9
+ ret
+ENDPROC(clear_page)
diff --git a/arch/nds32/lib/copy_template.S b/arch/nds32/lib/copy_template.S
new file mode 100644
index 000000000000..3a9a2de468c2
--- /dev/null
+++ b/arch/nds32/lib/copy_template.S
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+
+ beq $r1, $r0, quit_memcpy
+ beqz $r2, quit_memcpy
+ srli $r3, $r2, #5 ! check if len < cache-line size 32
+ beqz $r3, word_copy_entry
+ andi $r4, $r0, #0x3 ! check byte-align
+ beqz $r4, unalign_word_copy_entry
+
+ addi $r4, $r4,#-4
+ abs $r4, $r4 ! check how many un-align byte to copy
+ sub $r2, $r2, $r4 ! update $R2
+
+unalign_byte_copy:
+ lbi1 $r3, $r1, #1
+ addi $r4, $r4, #-1
+ sbi1 $r3, $r0, #1
+ bnez $r4, unalign_byte_copy
+ beqz $r2, quit_memcpy
+
+unalign_word_copy_entry:
+ andi $r3, $r0, 0x1f ! check cache-line unaligncount
+ beqz $r3, cache_copy
+
+ addi $r3, $r3, #-32
+ abs $r3, $r3
+ sub $r2, $r2, $r3 ! update $R2
+
+unalign_word_copy:
+ lmw1 $r4, $r1, $r4
+ addi $r3, $r3, #-4
+ smw1 $r4, $r0, $r4
+ bnez $r3, unalign_word_copy
+ beqz $r2, quit_memcpy
+
+ addi $r3, $r2, #-32 ! to check $r2< cache_line , than go to word_copy
+ bltz $r3, word_copy_entry
+cache_copy:
+ srli $r3, $r2, #5
+ beqz $r3, word_copy_entry
+3:
+ lmw1 $r17, $r1, $r24
+ addi $r3, $r3, #-1
+ smw1 $r17, $r0, $r24
+ bnez $r3, 3b
+
+word_copy_entry:
+ andi $r2, $r2, #31
+
+ beqz $r2, quit_memcpy
+5:
+ srli $r3, $r2, #2
+ beqz $r3, byte_copy
+word_copy:
+ lmw1 $r4, $r1, $r4
+ addi $r3, $r3, #-1
+ smw1 $r4, $r0, $r4
+ bnez $r3, word_copy
+ andi $r2, $r2, #3
+ beqz $r2, quit_memcpy
+byte_copy:
+ lbi1 $r3, $r1, #1
+ addi $r2, $r2, #-1
+
+ sbi1 $r3, $r0, #1
+ bnez $r2, byte_copy
+quit_memcpy:
diff --git a/arch/nds32/lib/copy_to_user.S b/arch/nds32/lib/copy_to_user.S
new file mode 100644
index 000000000000..3230044dcfb8
--- /dev/null
+++ b/arch/nds32/lib/copy_to_user.S
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+.macro lbi1 dst, addr, adj
+lbi.bi \dst, [\addr], \adj
+.endm
+
+.macro sbi1 src, addr, adj
+USER( sbi.bi, \src, [\addr], \adj)
+.endm
+
+.macro lmw1 start_reg, addr, end_reg
+lmw.bim \start_reg, [\addr], \end_reg
+.endm
+
+.macro smw1 start_reg, addr, end_reg
+USER( smw.bim, \start_reg, [\addr], \end_reg)
+.endm
+
+
+/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.text
+ENTRY(__arch_copy_to_user)
+ add $r5, $r0, $r2
+#include "copy_template.S"
+ move $r0, $r2
+ ret
+.section .fixup,"ax"
+.align 2
+9001:
+ sub $r0, $r5, $r0
+ ret
+.previous
+ENDPROC(__arch_copy_to_user)
diff --git a/arch/nds32/lib/memcpy.S b/arch/nds32/lib/memcpy.S
new file mode 100644
index 000000000000..a2345ea721e4
--- /dev/null
+++ b/arch/nds32/lib/memcpy.S
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+
+
+.macro lbi1 dst, addr, adj
+lbi.bi \dst, [\addr], \adj
+.endm
+
+.macro sbi1 src, addr, adj
+sbi.bi \src, [\addr], \adj
+.endm
+
+.macro lmw1 start_reg, addr, end_reg
+lmw.bim \start_reg, [\addr], \end_reg
+.endm
+
+.macro smw1 start_reg, addr, end_reg
+smw.bim \start_reg, [\addr], \end_reg
+.endm
+
+.text
+ENTRY(memcpy)
+ move $r5, $r0
+#include "copy_template.S"
+ move $r0, $r5
+ ret
+
+ENDPROC(memcpy)
diff --git a/arch/nds32/lib/memmove.S b/arch/nds32/lib/memmove.S
new file mode 100644
index 000000000000..c823aada2271
--- /dev/null
+++ b/arch/nds32/lib/memmove.S
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+
+/*
+ void *memmove(void *dst, const void *src, int n);
+
+ dst: $r0
+ src: $r1
+ n : $r2
+ ret: $r0 - pointer to the memory area dst.
+*/
+ .text
+
+ENTRY(memmove)
+ move $r5, $r0 ! Set return value = det
+ beq $r0, $r1, exit_memcpy ! Exit when det = src
+ beqz $r2, exit_memcpy ! Exit when n = 0
+ pushm $t0, $t1 ! Save reg
+ srli $p1, $r2, #2 ! $p1 is how many words to copy
+
+ ! Avoid data lost when memory overlap
+ ! Copy data reversely when src < dst
+ slt $p0, $r0, $r1 ! check if $r0 < $r1
+ beqz $p0, do_reverse ! branch if dst > src
+
+ ! No reverse, dst < src
+ andi $r2, $r2, #3 ! How many bytes are less than a word
+ li $t0, #1 ! Determining copy direction in byte_cpy
+ beqz $p1, byte_cpy ! When n is less than a word
+
+word_cpy:
+ lmw.bim $p0, [$r1], $p0 ! Read a word from src
+ addi $p1, $p1, #-1 ! How many words left to copy
+ smw.bim $p0, [$r0], $p0 ! Copy the word to det
+ bnez $p1, word_cpy ! If remained words > 0
+ beqz $r2, end_memcpy ! No left bytes to copy
+ b byte_cpy
+
+do_reverse:
+ add $r0, $r0, $r2 ! Start with the end of $r0
+ add $r1, $r1, $r2 ! Start with the end of $r1
+ andi $r2, $r2, #3 ! How many bytes are less than a word
+ li $t0, #-1 ! Determining copy direction in byte_cpy
+ beqz $p1, reverse_byte_cpy ! When n is less than a word
+
+reverse_word_cpy:
+ lmw.adm $p0, [$r1], $p0 ! Read a word from src
+ addi $p1, $p1, #-1 ! How many words left to copy
+ smw.adm $p0, [$r0], $p0 ! Copy the word to det
+ bnez $p1, reverse_word_cpy ! If remained words > 0
+ beqz $r2, end_memcpy ! No left bytes to copy
+
+reverse_byte_cpy:
+ addi $r0, $r0, #-1
+ addi $r1, $r1, #-1
+byte_cpy: ! Less than 4 bytes to copy now
+ lb.bi $p0, [$r1], $t0 ! Read a byte from src
+ addi $r2, $r2, #-1 ! How many bytes left to copy
+ sb.bi $p0, [$r0], $t0 ! copy the byte to det
+ bnez $r2, byte_cpy ! If remained bytes > 0
+
+end_memcpy:
+ popm $t0, $t1
+exit_memcpy:
+ move $r0, $r5
+ ret
+
+ENDPROC(memmove)
diff --git a/arch/nds32/lib/memset.S b/arch/nds32/lib/memset.S
new file mode 100644
index 000000000000..193cb6ce21a9
--- /dev/null
+++ b/arch/nds32/lib/memset.S
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(memset)
+ move $r5, $r0 ! Return value
+ beqz $r2, end_memset ! Exit when len = 0
+ srli $p1, $r2, 2 ! $p1 is how many words to copy
+ andi $r2, $r2, 3 ! How many bytes are less than a word
+ beqz $p1, byte_set ! When n is less than a word
+
+ ! set $r1 from ??????ab to abababab
+ andi $r1, $r1, #0x00ff ! $r1 = 000000ab
+ slli $p0, $r1, #8 ! $p0 = 0000ab00
+ or $r1, $r1, $p0 ! $r1 = 0000abab
+ slli $p0, $r1, #16 ! $p0 = abab0000
+ or $r1, $r1, $p0 ! $r1 = abababab
+word_set:
+ addi $p1, $p1, #-1 ! How many words left to copy
+ smw.bim $r1, [$r0], $r1 ! Copy the word to det
+ bnez $p1, word_set ! Still words to set, continue looping
+ beqz $r2, end_memset ! No left byte to set
+byte_set: ! Less than 4 bytes left to set
+ addi $r2, $r2, #-1 ! Decrease len by 1
+ sbi.bi $r1, [$r0], #1 ! Set data of the next byte to $r1
+ bnez $r2, byte_set ! Still bytes left to set
+end_memset:
+ move $r0, $r5
+ ret
+
+ENDPROC(memset)
diff --git a/arch/nds32/lib/memzero.S b/arch/nds32/lib/memzero.S
new file mode 100644
index 000000000000..f055972c9343
--- /dev/null
+++ b/arch/nds32/lib/memzero.S
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(memzero)
+ beqz $r1, 1f
+ push $lp
+ move $r2, $r1
+ move $r1, #0
+ push $r0
+ bal memset
+ pop $r0
+ pop $lp
+1:
+ ret
+ENDPROC(memzero)
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
new file mode 100644
index 000000000000..6b6855852223
--- /dev/null
+++ b/arch/nds32/mm/Makefile
@@ -0,0 +1,7 @@
+obj-y := extable.o tlb.o \
+ fault.o init.o ioremap.o mmap.o \
+ mm-nds32.o cacheflush.o proc.o
+
+obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
+CFLAGS_proc-n13.o += -fomit-frame-pointer
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c
new file mode 100644
index 000000000000..b96a01b10ca7
--- /dev/null
+++ b/arch/nds32/mm/alignment.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/sysctl.h>
+#include <asm/unaligned.h>
+
+#define DEBUG(enable, tagged, ...) \
+ do{ \
+ if (enable) { \
+ if (tagged) \
+ pr_warn("[ %30s() ] ", __func__); \
+ pr_warn(__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define RT(inst) (((inst) >> 20) & 0x1FUL)
+#define RA(inst) (((inst) >> 15) & 0x1FUL)
+#define RB(inst) (((inst) >> 10) & 0x1FUL)
+#define SV(inst) (((inst) >> 8) & 0x3UL)
+#define IMM(inst) (((inst) >> 0) & 0x3FFFUL)
+
+#define RA3(inst) (((inst) >> 3) & 0x7UL)
+#define RT3(inst) (((inst) >> 6) & 0x7UL)
+#define IMM3U(inst) (((inst) >> 0) & 0x7UL)
+
+#define RA5(inst) (((inst) >> 0) & 0x1FUL)
+#define RT4(inst) (((inst) >> 5) & 0xFUL)
+
+#define __get8_data(val,addr,err) \
+ __asm__( \
+ "1: lbi.bi %1, [%2], #1\n" \
+ "2:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: movi %0, #1\n" \
+ " j 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_data(addr, val_ptr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_data(v,a,err); \
+ *val_ptr = v << 0; \
+ __get8_data(v,a,err); \
+ *val_ptr |= v << 8; \
+ if (err) \
+ goto fault; \
+ *val_ptr = le16_to_cpu(*val_ptr); \
+ } while(0)
+
+#define get32_data(addr, val_ptr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_data(v,a,err); \
+ *val_ptr = v << 0; \
+ __get8_data(v,a,err); \
+ *val_ptr |= v << 8; \
+ __get8_data(v,a,err); \
+ *val_ptr |= v << 16; \
+ __get8_data(v,a,err); \
+ *val_ptr |= v << 24; \
+ if (err) \
+ goto fault; \
+ *val_ptr = le32_to_cpu(*val_ptr); \
+ } while(0)
+
+#define get_data(addr, val_ptr, len) \
+ if (len == 2) \
+ get16_data(addr, val_ptr); \
+ else \
+ get32_data(addr, val_ptr);
+
+#define set16_data(addr, val) \
+ do { \
+ unsigned int err = 0, *ptr = addr ; \
+ val = le32_to_cpu(val); \
+ __asm__( \
+ "1: sbi.bi %2, [%1], #1\n" \
+ " srli %2, %2, #8\n" \
+ "2: sbi %2, [%1]\n" \
+ "3:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: movi %0, #1\n" \
+ " j 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection\n" \
+ : "=r" (err), "+r" (ptr), "+r" (val) \
+ : "0" (err) \
+ ); \
+ if (err) \
+ goto fault; \
+ } while(0)
+
+#define set32_data(addr, val) \
+ do { \
+ unsigned int err = 0, *ptr = addr ; \
+ val = le32_to_cpu(val); \
+ __asm__( \
+ "1: sbi.bi %2, [%1], #1\n" \
+ " srli %2, %2, #8\n" \
+ "2: sbi.bi %2, [%1], #1\n" \
+ " srli %2, %2, #8\n" \
+ "3: sbi.bi %2, [%1], #1\n" \
+ " srli %2, %2, #8\n" \
+ "4: sbi %2, [%1]\n" \
+ "5:\n" \
+ " .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "6: movi %0, #1\n" \
+ " j 5b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .popsection\n" \
+ : "=r" (err), "+r" (ptr), "+r" (val) \
+ : "0" (err) \
+ ); \
+ if (err) \
+ goto fault; \
+ } while(0)
+#define set_data(addr, val, len) \
+ if (len == 2) \
+ set16_data(addr, val); \
+ else \
+ set32_data(addr, val);
+#define NDS32_16BIT_INSTRUCTION 0x80000000
+
+extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
+extern pte_t va_kernel_present(unsigned long addr);
+extern int va_readable(struct pt_regs *regs, unsigned long addr);
+extern int va_writable(struct pt_regs *regs, unsigned long addr);
+
+int unalign_access_mode = 0, unalign_access_debug = 0;
+
+static inline unsigned long *idx_to_addr(struct pt_regs *regs, int idx)
+{
+ /* this should be consistent with ptrace.h */
+ if (idx >= 0 && idx <= 25) /* R0-R25 */
+ return &regs->uregs[0] + idx;
+ else if (idx >= 28 && idx <= 30) /* FP, GP, LP */
+ return &regs->fp + (idx - 28);
+ else if (idx == 31) /* SP */
+ return &regs->sp;
+ else
+ return NULL; /* cause a segfault */
+}
+
+static inline unsigned long get_inst(unsigned long addr)
+{
+ return be32_to_cpu(get_unaligned((u32 *) addr));
+}
+
+static inline unsigned long sign_extend(unsigned long val, int len)
+{
+ unsigned long ret = 0;
+ unsigned char *s, *t;
+ int i = 0;
+
+ val = cpu_to_le32(val);
+
+ s = (void *)&val;
+ t = (void *)&ret;
+
+ while (i++ < len)
+ *t++ = *s++;
+
+ if (((*(t - 1)) & 0x80) && (i < 4)) {
+
+ while (i++ <= 4)
+ *t++ = 0xff;
+ }
+
+ return le32_to_cpu(ret);
+}
+
+static inline int do_16(unsigned long inst, struct pt_regs *regs)
+{
+ int imm, regular, load, len, addr_mode, idx_mode;
+ unsigned long unaligned_addr, target_val, source_idx, target_idx,
+ shift = 0;
+ switch ((inst >> 9) & 0x3F) {
+
+ case 0x12: /* LHI333 */
+ imm = 1;
+ regular = 1;
+ load = 1;
+ len = 2;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x10: /* LWI333 */
+ imm = 1;
+ regular = 1;
+ load = 1;
+ len = 4;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x11: /* LWI333.bi */
+ imm = 1;
+ regular = 0;
+ load = 1;
+ len = 4;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x1A: /* LWI450 */
+ imm = 0;
+ regular = 1;
+ load = 1;
+ len = 4;
+ addr_mode = 5;
+ idx_mode = 4;
+ break;
+ case 0x16: /* SHI333 */
+ imm = 1;
+ regular = 1;
+ load = 0;
+ len = 2;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x14: /* SWI333 */
+ imm = 1;
+ regular = 1;
+ load = 0;
+ len = 4;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x15: /* SWI333.bi */
+ imm = 1;
+ regular = 0;
+ load = 0;
+ len = 4;
+ addr_mode = 3;
+ idx_mode = 3;
+ break;
+ case 0x1B: /* SWI450 */
+ imm = 0;
+ regular = 1;
+ load = 0;
+ len = 4;
+ addr_mode = 5;
+ idx_mode = 4;
+ break;
+
+ default:
+ return -EFAULT;
+ }
+
+ if (addr_mode == 3) {
+ unaligned_addr = *idx_to_addr(regs, RA3(inst));
+ source_idx = RA3(inst);
+ } else {
+ unaligned_addr = *idx_to_addr(regs, RA5(inst));
+ source_idx = RA5(inst);
+ }
+
+ if (idx_mode == 3)
+ target_idx = RT3(inst);
+ else
+ target_idx = RT4(inst);
+
+ if (imm)
+ shift = IMM3U(inst) * len;
+
+ if (regular)
+ unaligned_addr += shift;
+
+ if (load) {
+ if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
+ return -EACCES;
+
+ get_data(unaligned_addr, &target_val, len);
+ *idx_to_addr(regs, target_idx) = target_val;
+ } else {
+ if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
+ return -EACCES;
+ target_val = *idx_to_addr(regs, target_idx);
+ set_data((void *)unaligned_addr, target_val, len);
+ }
+
+ if (!regular)
+ *idx_to_addr(regs, source_idx) = unaligned_addr + shift;
+ regs->ipc += 2;
+
+ return 0;
+fault:
+ return -EACCES;
+}
+
+static inline int do_32(unsigned long inst, struct pt_regs *regs)
+{
+ int imm, regular, load, len, sign_ext;
+ unsigned long unaligned_addr, target_val, shift;
+
+ unaligned_addr = *idx_to_addr(regs, RA(inst));
+
+ switch ((inst >> 25) << 1) {
+
+ case 0x02: /* LHI */
+ imm = 1;
+ regular = 1;
+ load = 1;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x0A: /* LHI.bi */
+ imm = 1;
+ regular = 0;
+ load = 1;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x22: /* LHSI */
+ imm = 1;
+ regular = 1;
+ load = 1;
+ len = 2;
+ sign_ext = 1;
+ break;
+ case 0x2A: /* LHSI.bi */
+ imm = 1;
+ regular = 0;
+ load = 1;
+ len = 2;
+ sign_ext = 1;
+ break;
+ case 0x04: /* LWI */
+ imm = 1;
+ regular = 1;
+ load = 1;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x0C: /* LWI.bi */
+ imm = 1;
+ regular = 0;
+ load = 1;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x12: /* SHI */
+ imm = 1;
+ regular = 1;
+ load = 0;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x1A: /* SHI.bi */
+ imm = 1;
+ regular = 0;
+ load = 0;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x14: /* SWI */
+ imm = 1;
+ regular = 1;
+ load = 0;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x1C: /* SWI.bi */
+ imm = 1;
+ regular = 0;
+ load = 0;
+ len = 4;
+ sign_ext = 0;
+ break;
+
+ default:
+ switch (inst & 0xff) {
+
+ case 0x01: /* LH */
+ imm = 0;
+ regular = 1;
+ load = 1;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x05: /* LH.bi */
+ imm = 0;
+ regular = 0;
+ load = 1;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x11: /* LHS */
+ imm = 0;
+ regular = 1;
+ load = 1;
+ len = 2;
+ sign_ext = 1;
+ break;
+ case 0x15: /* LHS.bi */
+ imm = 0;
+ regular = 0;
+ load = 1;
+ len = 2;
+ sign_ext = 1;
+ break;
+ case 0x02: /* LW */
+ imm = 0;
+ regular = 1;
+ load = 1;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x06: /* LW.bi */
+ imm = 0;
+ regular = 0;
+ load = 1;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x09: /* SH */
+ imm = 0;
+ regular = 1;
+ load = 0;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x0D: /* SH.bi */
+ imm = 0;
+ regular = 0;
+ load = 0;
+ len = 2;
+ sign_ext = 0;
+ break;
+ case 0x0A: /* SW */
+ imm = 0;
+ regular = 1;
+ load = 0;
+ len = 4;
+ sign_ext = 0;
+ break;
+ case 0x0E: /* SW.bi */
+ imm = 0;
+ regular = 0;
+ load = 0;
+ len = 4;
+ sign_ext = 0;
+ break;
+
+ default:
+ return -EFAULT;
+ }
+ }
+
+ if (imm)
+ shift = IMM(inst) * len;
+ else
+ shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
+
+ if (regular)
+ unaligned_addr += shift;
+
+ if (load) {
+
+ if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
+ return -EACCES;
+
+ get_data(unaligned_addr, &target_val, len);
+
+ if (sign_ext)
+ *idx_to_addr(regs, RT(inst)) =
+ sign_extend(target_val, len);
+ else
+ *idx_to_addr(regs, RT(inst)) = target_val;
+ } else {
+
+ if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
+ return -EACCES;
+
+ target_val = *idx_to_addr(regs, RT(inst));
+ set_data((void *)unaligned_addr, target_val, len);
+ }
+
+ if (!regular)
+ *idx_to_addr(regs, RA(inst)) = unaligned_addr + shift;
+
+ regs->ipc += 4;
+
+ return 0;
+fault:
+ return -EACCES;
+}
+
+int do_unaligned_access(unsigned long addr, struct pt_regs *regs)
+{
+ unsigned long inst;
+ int ret = -EFAULT;
+ mm_segment_t seg = get_fs();
+
+ inst = get_inst(regs->ipc);
+
+ DEBUG((unalign_access_debug > 0), 1,
+ "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr,
+ regs->ipc, inst);
+
+ set_fs(USER_DS);
+
+ if (inst & NDS32_16BIT_INSTRUCTION)
+ ret = do_16((inst >> 16) & 0xffff, regs);
+ else
+ ret = do_32(inst, regs);
+ set_fs(seg);
+
+ return ret;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static struct ctl_table alignment_tbl[3] = {
+ {
+ .procname = "enable",
+ .data = &unalign_access_mode,
+ .maxlen = sizeof(unalign_access_mode),
+ .mode = 0666,
+ .proc_handler = &proc_dointvec
+ }
+ ,
+ {
+ .procname = "debug_info",
+ .data = &unalign_access_debug,
+ .maxlen = sizeof(unalign_access_debug),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ }
+ ,
+ {}
+};
+
+static struct ctl_table nds32_sysctl_table[2] = {
+ {
+ .procname = "unaligned_acess",
+ .mode = 0555,
+ .child = alignment_tbl},
+ {}
+};
+
+static struct ctl_path nds32_path[2] = {
+ {.procname = "nds32"},
+ {}
+};
+
+/*
+ * Initialize nds32 alignment-correction interface
+ */
+static int __init nds32_sysctl_init(void)
+{
+ register_sysctl_paths(nds32_path, nds32_sysctl_table);
+ return 0;
+}
+
+__initcall(nds32_sysctl_init);
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
new file mode 100644
index 000000000000..6eb786a399a2
--- /dev/null
+++ b/arch/nds32/mm/cacheflush.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/proc-fns.h>
+#include <asm/shmparam.h>
+#include <asm/cache_info.h>
+
+extern struct cache_info L1_cache_info[2];
+
+#ifndef CONFIG_CPU_CACHE_ALIASING
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t * pte)
+{
+ struct page *page;
+ unsigned long pfn = pte_pfn(*pte);
+ unsigned long flags;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ if (vma->vm_mm == current->active_mm) {
+ local_irq_save(flags);
+ __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
+ __nds32__tlbop_rwr(*pte);
+ __nds32__isb();
+ local_irq_restore(flags);
+ }
+ page = pfn_to_page(pfn);
+
+ if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
+ (vma->vm_flags & VM_EXEC)) {
+
+ if (!PageHighMem(page)) {
+ cpu_cache_wbinval_page((unsigned long)
+ page_address(page),
+ vma->vm_flags & VM_EXEC);
+ } else {
+ unsigned long kaddr = (unsigned long)kmap_atomic(page);
+ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+ kunmap_atomic((void *)kaddr);
+ }
+ }
+}
+#else
+extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
+
+static inline unsigned long aliasing(unsigned long addr, unsigned long page)
+{
+ return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
+}
+
+static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
+{
+ unsigned long kaddr, pte;
+
+#define BASE_ADDR0 0xffffc000
+ kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
+ pte = (pa | PAGE_KERNEL);
+ __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
+ __nds32__tlbop_rwlk(pte);
+ __nds32__isb();
+ return kaddr;
+}
+
+static inline void kunmap01(unsigned long kaddr)
+{
+ __nds32__tlbop_unlk(kaddr);
+ __nds32__tlbop_inv(kaddr);
+ __nds32__isb();
+}
+
+static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
+{
+ unsigned long kaddr, pte;
+
+#define BASE_ADDR1 0xffff8000
+ kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
+ pte = (pa | PAGE_KERNEL);
+ __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
+ __nds32__tlbop_rwlk(pte);
+ __nds32__isb();
+ return kaddr;
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cpu_dcache_wbinval_all();
+ cpu_icache_inval_all();
+ local_irq_restore(flags);
+}
+
+void flush_cache_dup_mm(struct mm_struct *mm)
+{
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ if ((end - start) > 8 * PAGE_SIZE) {
+ cpu_dcache_wbinval_all();
+ if (vma->vm_flags & VM_EXEC)
+ cpu_icache_inval_all();
+ return;
+ }
+ local_irq_save(flags);
+ while (start < end) {
+ if (va_present(vma->vm_mm, start))
+ cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
+ start += PAGE_SIZE;
+ }
+ local_irq_restore(flags);
+ return;
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ unsigned long vto, flags;
+
+ local_irq_save(flags);
+ vto = kremap0(addr, pfn << PAGE_SHIFT);
+ cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
+ kunmap01(vto);
+ local_irq_restore(flags);
+}
+
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ cpu_dcache_wbinval_all();
+ cpu_icache_inval_all();
+}
+
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ cpu_dcache_wbinval_all();
+ cpu_icache_inval_all();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
+ kto = ((unsigned long)page_address(to) & PAGE_MASK);
+ kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
+ pto = page_to_phys(to);
+ pfrom = page_to_phys(from);
+
+ if (aliasing(vaddr, (unsigned long)kfrom))
+ cpu_dcache_wb_page((unsigned long)kfrom);
+ if (aliasing(vaddr, (unsigned long)kto))
+ cpu_dcache_inval_page((unsigned long)kto);
+ local_irq_save(flags);
+ vto = kremap0(vaddr, pto);
+ vfrom = kremap1(vaddr, pfrom);
+ copy_page((void *)vto, (void *)vfrom);
+ kunmap01(vfrom);
+ kunmap01(vto);
+ local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(copy_user_highpage);
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ unsigned long vto, flags, kto;
+
+ kto = ((unsigned long)page_address(page) & PAGE_MASK);
+
+ local_irq_save(flags);
+ if (aliasing(kto, vaddr) && kto != 0) {
+ cpu_dcache_inval_page(kto);
+ cpu_icache_inval_page(kto);
+ }
+ vto = kremap0(vaddr, page_to_phys(page));
+ clear_page((void *)vto);
+ kunmap01(vto);
+ local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(clear_user_highpage);
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ mapping = page_mapping(page);
+ if (mapping && !mapping_mapped(mapping))
+ set_bit(PG_dcache_dirty, &page->flags);
+ else {
+ int i, pc;
+ unsigned long vto, kaddr, flags;
+ kaddr = (unsigned long)page_address(page);
+ cpu_dcache_wbinval_page(kaddr);
+ pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
+ local_irq_save(flags);
+ for (i = 0; i < pc; i++) {
+ vto =
+ kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
+ cpu_dcache_wbinval_page(vto);
+ kunmap01(vto);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ unsigned long line_size, start, end, vto, flags;
+
+ local_irq_save(flags);
+ vto = kremap0(vaddr, page_to_phys(page));
+ dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = (unsigned long)dst & ~(line_size - 1);
+ end =
+ ((unsigned long)dst + len + line_size - 1) & ~(line_size -
+ 1);
+ cpu_cache_wbinval_range(start, end, 1);
+ }
+ kunmap01(vto);
+ local_irq_restore(flags);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ unsigned long vto, flags;
+
+ local_irq_save(flags);
+ vto = kremap0(vaddr, page_to_phys(page));
+ src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
+ memcpy(dst, src, len);
+ kunmap01(vto);
+ local_irq_restore(flags);
+}
+
+void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr)
+{
+ unsigned long flags;
+ if (!PageAnon(page))
+ return;
+
+ if (vma->vm_mm != current->active_mm)
+ return;
+
+ local_irq_save(flags);
+ if (vma->vm_flags & VM_EXEC)
+ cpu_icache_inval_page(vaddr & PAGE_MASK);
+ cpu_dcache_wbinval_page((unsigned long)page_address(page));
+ local_irq_restore(flags);
+}
+
+void flush_kernel_dcache_page(struct page *page)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ cpu_dcache_wbinval_page((unsigned long)page_address(page));
+ local_irq_restore(flags);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size, flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & ~(line_size - 1);
+ end = (end + line_size - 1) & ~(line_size - 1);
+ local_irq_save(flags);
+ cpu_cache_wbinval_range(start, end, 1);
+ local_irq_restore(flags);
+}
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ cpu_cache_wbinval_page((unsigned long)page_address(page),
+ vma->vm_flags & VM_EXEC);
+ local_irq_restore(flags);
+}
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t * pte)
+{
+ struct page *page;
+ unsigned long flags;
+ unsigned long pfn = pte_pfn(*pte);
+
+ if (!pfn_valid(pfn))
+ return;
+
+ if (vma->vm_mm == current->active_mm) {
+ local_irq_save(flags);
+ __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
+ __nds32__tlbop_rwr(*pte);
+ __nds32__isb();
+ local_irq_restore(flags);
+ }
+
+ page = pfn_to_page(pfn);
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
+ (vma->vm_flags & VM_EXEC)) {
+ local_irq_save(flags);
+ cpu_dcache_wbinval_page((unsigned long)page_address(page));
+ local_irq_restore(flags);
+ }
+}
+#endif
diff --git a/arch/nds32/mm/extable.c b/arch/nds32/mm/extable.c
new file mode 100644
index 000000000000..db7f0a7c8966
--- /dev/null
+++ b/arch/nds32/mm/extable.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup)
+ regs->ipc = fixup->fixup;
+
+ return fixup != NULL;
+}
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
new file mode 100644
index 000000000000..3a246fb8098c
--- /dev/null
+++ b/arch/nds32/mm/fault.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+extern void die(const char *str, struct pt_regs *regs, long err);
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ if (!mm)
+ mm = &init_mm;
+
+ pr_alert("pgd = %p\n", mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
+
+ do {
+ pmd_t *pmd;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ pr_alert("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset(pgd, addr);
+#if PTRS_PER_PMD != 1
+ pr_alert(", *pmd=%08lx", pmd_val(*pmd));
+#endif
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ pr_alert("(bad)");
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ {
+ pte_t *pte;
+ /* We must not map this if we have highmem enabled */
+ pte = pte_offset_map(pmd, addr);
+ pr_alert(", *pte=%08lx", pte_val(*pte));
+ pte_unmap(pte);
+ }
+ } while (0);
+
+ pr_alert("\n");
+}
+
+void do_page_fault(unsigned long entry, unsigned long addr,
+ unsigned int error_code, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ siginfo_t info;
+ int fault;
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
+ tsk = current;
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (addr >= TASK_SIZE) {
+ if (user_mode(regs))
+ goto bad_area_nosemaphore;
+
+ if (addr >= TASK_SIZE && addr < VMALLOC_END
+ && (entry == ENTRY_PTE_NOT_PRESENT))
+ goto vmalloc_fault;
+ else
+ goto no_context;
+ }
+
+ /* Send a signal to the task for handling the unalignment access. */
+ if (entry == ENTRY_GENERAL_EXCPETION
+ && error_code == ETYPE_ALIGNMENT_CHECK) {
+ if (user_mode(regs))
+ goto bad_area_nosemaphore;
+ else
+ goto no_context;
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (unlikely(faulthandler_disabled() || !mm))
+ goto no_context;
+
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (!user_mode(regs) &&
+ !search_exception_tables(instruction_pointer(regs)))
+ goto no_context;
+retry:
+ down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in which
+ * case, we'll have missed the might_sleep() from down_read().
+ */
+ might_sleep();
+ if (IS_ENABLED(CONFIG_DEBUG_VM)) {
+ if (!user_mode(regs) &&
+ !search_exception_tables(instruction_pointer(regs)))
+ goto no_context;
+ }
+ }
+
+ vma = find_vma(mm, addr);
+
+ if (unlikely(!vma))
+ goto bad_area;
+
+ if (vma->vm_start <= addr)
+ goto good_area;
+
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+ goto bad_area;
+
+ if (unlikely(expand_stack(vma, addr)))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* first do some preliminary protection checks */
+ if (entry == ENTRY_PTE_NOT_PRESENT) {
+ if (error_code & ITYPE_mskINST)
+ mask = VM_EXEC;
+ else {
+ mask = VM_READ | VM_WRITE;
+ if (vma->vm_flags & VM_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+ }
+ } else if (entry == ENTRY_TLB_MISC) {
+ switch (error_code & ITYPE_mskETYPE) {
+ case RD_PROT:
+ mask = VM_READ;
+ break;
+ case WRT_PROT:
+ mask = VM_WRITE;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ case NOEXEC:
+ mask = VM_EXEC;
+ break;
+ case PAGE_MODIFY:
+ mask = VM_WRITE;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ case ACC_BIT:
+ BUG();
+ default:
+ break;
+ }
+
+ }
+ if (!(vma->vm_flags & mask))
+ goto bad_area;
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+
+ fault = handle_mm_fault(vma, addr, flags);
+
+ /*
+ * If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
+ */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ else
+ goto bad_area;
+ }
+
+ /*
+ * Major/minor page fault accounting is only done on the initial
+ * attempt. If we go through a retry, it is extremely likely that the
+ * page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+ /* User mode accesses just cause a SIGSEGV */
+
+ if (user_mode(regs)) {
+ tsk->thread.address = addr;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = entry;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)addr;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+
+ {
+ const struct exception_table_entry *entry;
+
+ if ((entry =
+ search_exception_tables(instruction_pointer(regs))) !=
+ NULL) {
+ /* Adjust the instruction pointer in the stackframe */
+ instruction_pointer(regs) = entry->fixup;
+ return;
+ }
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+
+ show_pte(mm, addr);
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+
+ return;
+
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+
+ /*
+ * Send a sigbus
+ */
+ tsk->thread.address = addr;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = entry;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)addr;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Use current_pgd instead of tsk->active_mm->pgd
+ * since the latter might be unavailable if this
+ * code is executed in a misfortunately run irq
+ * (like inside schedule() between switch_mm and
+ * switch_to...).
+ */
+
+ unsigned int index = pgd_index(addr);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+
+ pud = pud_offset(pgd, addr);
+ pud_k = pud_offset(pgd_k, addr);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
+ else
+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+
+ /*
+ * Since the vmalloc area is global, we don't
+ * need to copy individual PTE's, it is enough to
+ * copy the pgd pointer into the pte page of the
+ * root task. If that is there, we'll find our pte if
+ * it exists.
+ */
+
+ /* Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addres. If we don't do this, this will just
+ * silently loop forever.
+ */
+
+ pte_k = pte_offset_kernel(pmd_k, addr);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
+ return;
+ }
+}
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
new file mode 100644
index 000000000000..e17cb8a69315
--- /dev/null
+++ b/arch/nds32/mm/highmem.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+void *kmap(struct page *page)
+{
+ unsigned long vaddr;
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ vaddr = (unsigned long)kmap_high(page);
+ return (void *)vaddr;
+}
+
+EXPORT_SYMBOL(kmap);
+
+void kunmap(struct page *page)
+{
+ BUG_ON(in_interrupt());
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+EXPORT_SYMBOL(kunmap);
+
+void *kmap_atomic(struct page *page)
+{
+ unsigned int idx;
+ unsigned long vaddr, pte;
+ int type;
+ pte_t *ptep;
+
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL);
+ ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ set_pte(ptep, pte);
+
+ __nds32__tlbop_inv(vaddr);
+ __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN);
+ __nds32__tlbop_rwr(pte);
+ __nds32__isb();
+ return (void *)vaddr;
+}
+
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ if (kvaddr >= (void *)FIXADDR_START) {
+ unsigned long vaddr = (unsigned long)kvaddr;
+ pte_t *ptep;
+ kmap_atomic_idx_pop();
+ __nds32__tlbop_inv(vaddr);
+ __nds32__isb();
+ ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ set_pte(ptep, 0);
+ }
+ pagefault_enable();
+ preempt_enable();
+}
+
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
new file mode 100644
index 000000000000..93ee0160720b
--- /dev/null
+++ b/arch/nds32/mm/init.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 1995-2005 Russell King
+// Copyright (C) 2012 ARM Ltd.
+// Copyright (C) 2013-2017 Andes Technology Corporation
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/tlb.h>
+#include <asm/page.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+DEFINE_SPINLOCK(anon_alias_lock);
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern unsigned long phys_initrd_start;
+extern unsigned long phys_initrd_size;
+
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ /* Clear the zone sizes */
+ memset(zones_size, 0, sizeof(zones_size));
+
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = max_pfn;
+#endif
+ free_area_init(zones_size);
+
+}
+
+/*
+ * Map all physical memory under high_memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+ unsigned long v, p, e;
+ pgd_t *pge;
+ pud_t *pue;
+ pmd_t *pme;
+ pte_t *pte;
+ /* These mark extents of read-only kernel pages...
+ * ...from vmlinux.lds.S
+ */
+
+ p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
+ e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
+
+ v = (u32) __va(p);
+ pge = pgd_offset_k(v);
+
+ while (p < e) {
+ int j;
+ pue = pud_offset(pge, v);
+ pme = pmd_offset(pue, v);
+
+ if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+ panic("%s: Kernel hardcoded for "
+ "two-level page tables", __func__);
+ }
+
+ /* Alloc one page for holding PTE's... */
+ pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ memset(pte, 0, PAGE_SIZE);
+ set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
+
+ /* Fill the newly allocated page with PTE'S */
+ for (j = 0; p < e && j < PTRS_PER_PTE;
+ v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+ /* Create mapping between p and v. */
+ /* TODO: more fine grant for page access permission */
+ set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
+ }
+
+ pge++;
+ }
+}
+static pmd_t *fixmap_pmd_p;
+static void __init fixedrange_init(void)
+{
+ unsigned long vaddr;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+#ifdef CONFIG_HIGHMEM
+ pte_t *pte;
+#endif /* CONFIG_HIGHMEM */
+
+ /*
+ * Fixed mappings:
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ memset(fixmap_pmd_p, 0, PAGE_SIZE);
+ set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
+
+#ifdef CONFIG_HIGHMEM
+ /*
+ * Permanent kmaps:
+ */
+ vaddr = PKMAP_BASE;
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+ pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ memset(pte, 0, PAGE_SIZE);
+ set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
+ pkmap_page_table = pte;
+#endif /* CONFIG_HIGHMEM */
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(void)
+{
+ int i;
+ void *zero_page;
+
+ pr_info("Setting up paging and PTEs.\n");
+ /* clear out the init_mm.pgd that will contain the kernel's mappings */
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i] = __pgd(1);
+
+ map_ram();
+
+ fixedrange_init();
+
+ /* allocate space for empty_zero_page */
+ zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+ memset(zero_page, 0, PAGE_SIZE);
+ zone_sizes_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ flush_dcache_page(empty_zero_page);
+}
+
+static inline void __init free_highmem(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long pfn;
+ for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
+ phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
+ if (!memblock_is_reserved(paddr))
+ free_highmem_page(pfn_to_page(pfn));
+ }
+#endif
+}
+
+static void __init set_max_mapnr_init(void)
+{
+ max_mapnr = max_pfn;
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ phys_addr_t memory_start = memblock_start_of_DRAM();
+ BUG_ON(!mem_map);
+ set_max_mapnr_init();
+
+ free_highmem();
+
+ /* this will put all low memory onto the freelists */
+ free_all_bootmem();
+ mem_init_print_info(NULL);
+
+ pr_info("virtual kernel memory layout:\n"
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+#endif
+ " consist : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
+ FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP * PAGE_SIZE) >> 10,
+#endif
+ CONSISTENT_BASE, CONSISTENT_END,
+ ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START,
+ (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20,
+ (unsigned long)__va(memory_start), (unsigned long)high_memory,
+ ((unsigned long)high_memory -
+ (unsigned long)__va(memory_start)) >> 20,
+ (unsigned long)&__init_begin, (unsigned long)&__init_end,
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext,
+ (unsigned long)&_edata,
+ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+ (unsigned long)&_text, (unsigned long)&_etext,
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+ * Check boundaries twice: Some fundamental inconsistencies can
+ * be detected at build time already.
+ */
+#ifdef CONFIG_HIGHMEM
+ BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
+ BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE);
+#endif
+ BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+ BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
+
+#ifdef CONFIG_HIGHMEM
+ BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
+ BUG_ON(CONSISTENT_END > PKMAP_BASE);
+#endif
+ BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+ BUG_ON(VMALLOC_START >= VMALLOC_END);
+ BUG_ON((unsigned long)high_memory > VMALLOC_START);
+
+ return;
+}
+
+void free_initmem(void)
+{
+ free_initmem_default(-1);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
+
+void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];;
+
+ if (pgprot_val(flags)) {
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ } else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ }
+}
diff --git a/arch/nds32/mm/ioremap.c b/arch/nds32/mm/ioremap.c
new file mode 100644
index 000000000000..690140bb23a2
--- /dev/null
+++ b/arch/nds32/mm/ioremap.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
+
+static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
+ void *caller)
+{
+ struct vm_struct *area;
+ unsigned long addr, offset, last_addr;
+ pgprot_t prot;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys_addr;
+ addr = (unsigned long)area->addr;
+ prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D |
+ _PAGE_G | _PAGE_C_DEV);
+ if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+ return (__force void __iomem *)(offset + (char *)addr);
+
+}
+
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
+{
+ return __ioremap_caller(phys_addr, size,
+ __builtin_return_address(0));
+}
+
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(volatile void __iomem * addr)
+{
+ vunmap((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/nds32/mm/mm-nds32.c b/arch/nds32/mm/mm-nds32.c
new file mode 100644
index 000000000000..3b43798d754f
--- /dev/null
+++ b/arch/nds32/mm/mm-nds32.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/init_task.h>
+#include <asm/pgalloc.h>
+
+#define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD)
+
+/*
+ * need to get a page for level 1
+ */
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *new_pgd, *init_pgd;
+ int i;
+
+ new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0);
+ if (!new_pgd)
+ return NULL;
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ (*new_pgd) = 1;
+ new_pgd++;
+ }
+ new_pgd -= PTRS_PER_PGD;
+
+ init_pgd = pgd_offset_k(0);
+
+ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+ (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+ cpu_dcache_wb_range((unsigned long)new_pgd,
+ (unsigned long)new_pgd +
+ PTRS_PER_PGD * sizeof(pgd_t));
+ inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
+ NR_PAGETABLE);
+
+ return new_pgd;
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t * pgd)
+{
+ pmd_t *pmd;
+ struct page *pte;
+
+ if (!pgd)
+ return;
+
+ pmd = (pmd_t *) pgd;
+ if (pmd_none(*pmd))
+ goto free;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ goto free;
+ }
+
+ pte = pmd_page(*pmd);
+ pmd_clear(pmd);
+ dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
+ pte_free(mm, pte);
+ mm_dec_nr_ptes(mm);
+ pmd_free(mm, pmd);
+free:
+ free_pages((unsigned long)pgd, 0);
+}
+
+/*
+ * In order to soft-boot, we need to insert a 1:1 mapping in place of
+ * the user-mode pages. This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+void setup_mm_for_reboot(char mode)
+{
+ unsigned long pmdval;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ int i;
+
+ if (current->mm && current->mm->pgd)
+ pgd = current->mm->pgd;
+ else
+ pgd = init_mm.pgd;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ pmdval = (i << PGDIR_SHIFT);
+ pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
+ set_pmd(pmd, __pmd(pmdval));
+ }
+}
diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c
new file mode 100644
index 000000000000..c206b31ce07a
--- /dev/null
+++ b/arch/nds32/mm/mmap.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+
+#define COLOUR_ALIGN(addr,pgoff) \
+ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
+ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+/*
+ * We need to ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches. We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ *
+ * We unconditionally provide this function for all cases, however
+ * in the VIVT case, we optimise out the alignment rules.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ struct vm_unmapped_area_info info;
+ int aliasing = 0;
+ if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
+ aliasing = 1;
+
+ /*
+ * We only need to do colour alignment if either the I or D
+ * caches alias.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ /*
+ * We enforce the MAP_FIXED case.
+ */
+ if (flags & MAP_FIXED) {
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
new file mode 100644
index 000000000000..ba80992d13a2
--- /dev/null
+++ b/arch/nds32/mm/proc.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/nds32.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/l2_cache.h>
+#include <nds32_intrinsic.h>
+
+#include <asm/cache_info.h>
+extern struct cache_info L1_cache_info[2];
+
+int va_kernel_present(unsigned long addr)
+{
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pmd = pmd_offset(pgd_offset_k(addr), addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte;
+ }
+ return 0;
+}
+
+pte_t va_present(struct mm_struct * mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte;
+ }
+ }
+ }
+ return 0;
+
+}
+
+int va_readable(struct pt_regs *regs, unsigned long addr)
+{
+ struct mm_struct *mm = current->mm;
+ pte_t pte;
+ int ret = 0;
+
+ if (user_mode(regs)) {
+ /* user mode */
+ pte = va_present(mm, addr);
+ if (!pte && pte_read(pte))
+ ret = 1;
+ } else {
+ /* superuser mode is always readable, so we can only
+ * check it is present or not*/
+ return (! !va_kernel_present(addr));
+ }
+ return ret;
+}
+
+int va_writable(struct pt_regs *regs, unsigned long addr)
+{
+ struct mm_struct *mm = current->mm;
+ pte_t pte;
+ int ret = 0;
+
+ if (user_mode(regs)) {
+ /* user mode */
+ pte = va_present(mm, addr);
+ if (!pte && pte_write(pte))
+ ret = 1;
+ } else {
+ /* superuser mode */
+ pte = va_kernel_present(addr);
+ if (!pte && pte_kernel_write(pte))
+ ret = 1;
+ }
+ return ret;
+}
+
+/*
+ * All
+ */
+void cpu_icache_inval_all(void)
+{
+ unsigned long end, line_size;
+
+ line_size = L1_cache_info[ICACHE].line_size;
+ end =
+ line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
+
+ do {
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+ } while (end > 0);
+ __nds32__isb();
+}
+
+void cpu_dcache_inval_all(void)
+{
+ __nds32__cctl_l1d_invalall();
+}
+
+#ifdef CONFIG_CACHE_L2
+void dcache_wb_all_level(void)
+{
+ unsigned long flags, cmd;
+ local_irq_save(flags);
+ __nds32__cctl_l1d_wball_alvl();
+ /* Section 1: Ensure the section 2 & 3 program code execution after */
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+
+ /* Section 2: Confirm the writeback all level is done in CPU and L2C */
+ cmd = CCTL_CMD_L2_SYNC;
+ L2_CMD_RDY();
+ L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+ L2_CMD_RDY();
+
+ /* Section 3: Writeback whole L2 cache */
+ cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
+ L2_CMD_RDY();
+ L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+ L2_CMD_RDY();
+ __nds32__msync_all();
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(dcache_wb_all_level);
+#endif
+
+void cpu_dcache_wb_all(void)
+{
+ __nds32__cctl_l1d_wball_one_lvl();
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_dcache_wbinval_all(void)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ unsigned long flags;
+ local_irq_save(flags);
+#endif
+ cpu_dcache_wb_all();
+ cpu_dcache_inval_all();
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Page
+ */
+void cpu_icache_inval_page(unsigned long start)
+{
+ unsigned long line_size, end;
+
+ line_size = L1_cache_info[ICACHE].line_size;
+ end = start + PAGE_SIZE;
+
+ do {
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+ } while (end != start);
+ __nds32__isb();
+}
+
+void cpu_dcache_inval_page(unsigned long start)
+{
+ unsigned long line_size, end;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+ end = start + PAGE_SIZE;
+
+ do {
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ } while (end != start);
+}
+
+void cpu_dcache_wb_page(unsigned long start)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ unsigned long line_size, end;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+ end = start + PAGE_SIZE;
+
+ do {
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+ end -= line_size;
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+ } while (end != start);
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+#endif
+}
+
+void cpu_dcache_wbinval_page(unsigned long start)
+{
+ unsigned long line_size, end;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+ end = start + PAGE_SIZE;
+
+ do {
+ end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+ } while (end != start);
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_cache_wbinval_page(unsigned long page, int flushi)
+{
+ cpu_dcache_wbinval_page(page);
+ if (flushi)
+ cpu_icache_inval_page(page);
+}
+
+/*
+ * Range
+ */
+void cpu_icache_inval_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+
+ line_size = L1_cache_info[ICACHE].line_size;
+
+ while (end > start) {
+ __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
+ start += line_size;
+ }
+ __nds32__isb();
+}
+
+void cpu_dcache_inval_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+
+ while (end > start) {
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
+ start += line_size;
+ }
+}
+
+void cpu_dcache_wb_range(unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ unsigned long line_size;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+
+ while (end > start) {
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
+ start += line_size;
+ }
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+#endif
+}
+
+void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+
+ while (end > start) {
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
+#endif
+ __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
+ start += line_size;
+ }
+ __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
+{
+ unsigned long line_size, align_start, align_end;
+
+ line_size = L1_cache_info[DCACHE].line_size;
+ align_start = start & ~(line_size - 1);
+ align_end = (end + line_size - 1) & ~(line_size - 1);
+ cpu_dcache_wbinval_range(align_start, align_end);
+
+ if (flushi) {
+ line_size = L1_cache_info[ICACHE].line_size;
+ align_start = start & ~(line_size - 1);
+ align_end = (end + line_size - 1) & ~(line_size - 1);
+ cpu_icache_inval_range(align_start, align_end);
+ }
+}
+
+void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ bool flushi, bool wbd)
+{
+ unsigned long line_size, t_start, t_end;
+
+ if (!flushi && !wbd)
+ return;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & ~(line_size - 1);
+ end = (end + line_size - 1) & ~(line_size - 1);
+
+ if ((end - start) > (8 * PAGE_SIZE)) {
+ if (wbd)
+ cpu_dcache_wbinval_all();
+ if (flushi)
+ cpu_icache_inval_all();
+ return;
+ }
+
+ t_start = (start + PAGE_SIZE) & PAGE_MASK;
+ t_end = ((end - 1) & PAGE_MASK);
+
+ if ((start & PAGE_MASK) == t_end) {
+ if (va_present(vma->vm_mm, start)) {
+ if (wbd)
+ cpu_dcache_wbinval_range(start, end);
+ if (flushi)
+ cpu_icache_inval_range(start, end);
+ }
+ return;
+ }
+
+ if (va_present(vma->vm_mm, start)) {
+ if (wbd)
+ cpu_dcache_wbinval_range(start, t_start);
+ if (flushi)
+ cpu_icache_inval_range(start, t_start);
+ }
+
+ if (va_present(vma->vm_mm, end - 1)) {
+ if (wbd)
+ cpu_dcache_wbinval_range(t_end, end);
+ if (flushi)
+ cpu_icache_inval_range(t_end, end);
+ }
+
+ while (t_start < t_end) {
+ if (va_present(vma->vm_mm, t_start)) {
+ if (wbd)
+ cpu_dcache_wbinval_page(t_start);
+ if (flushi)
+ cpu_icache_inval_page(t_start);
+ }
+ t_start += PAGE_SIZE;
+ }
+}
+
+#ifdef CONFIG_CACHE_L2
+static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
+{
+ if (atl2c_base) {
+ unsigned long p_start = __pa(start);
+ unsigned long p_end = __pa(end);
+ unsigned long cmd;
+ unsigned long line_size;
+ /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
+ line_size = L2_CACHE_LINE_SIZE();
+ p_start = p_start & (~(line_size - 1));
+ p_end = (p_end + line_size - 1) & (~(line_size - 1));
+ cmd =
+ (p_start & ~(line_size - 1)) | op |
+ CCTL_SINGLE_CMD;
+ do {
+ L2_CMD_RDY();
+ L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+ cmd += line_size;
+ p_start += line_size;
+ } while (p_end > p_start);
+ cmd = CCTL_CMD_L2_SYNC;
+ L2_CMD_RDY();
+ L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+ L2_CMD_RDY();
+ }
+}
+#else
+#define cpu_l2cache_op(start,end,op) do { } while (0)
+#endif
+/*
+ * DMA
+ */
+void cpu_dma_wb_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+ unsigned long flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & (~(line_size - 1));
+ end = (end + line_size - 1) & (~(line_size - 1));
+ if (unlikely(start == end))
+ return;
+
+ local_irq_save(flags);
+ cpu_dcache_wb_range(start, end);
+ cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
+ __nds32__msync_all();
+ local_irq_restore(flags);
+}
+
+void cpu_dma_inval_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+ unsigned long old_start = start;
+ unsigned long old_end = end;
+ unsigned long flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & (~(line_size - 1));
+ end = (end + line_size - 1) & (~(line_size - 1));
+ if (unlikely(start == end))
+ return;
+ local_irq_save(flags);
+ if (start != old_start) {
+ cpu_dcache_wbinval_range(start, start + line_size);
+ cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
+ }
+ if (end != old_end) {
+ cpu_dcache_wbinval_range(end - line_size, end);
+ cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
+ }
+ cpu_dcache_inval_range(start, end);
+ cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
+ __nds32__msync_all();
+ local_irq_restore(flags);
+
+}
+
+void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size;
+ unsigned long flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & (~(line_size - 1));
+ end = (end + line_size - 1) & (~(line_size - 1));
+ if (unlikely(start == end))
+ return;
+
+ local_irq_save(flags);
+ cpu_dcache_wbinval_range(start, end);
+ cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
+ __nds32__msync_all();
+ local_irq_restore(flags);
+}
+
+void cpu_proc_init(void)
+{
+}
+
+void cpu_proc_fin(void)
+{
+}
+
+void cpu_do_idle(void)
+{
+ __nds32__standby_no_wake_grant();
+}
+
+void cpu_reset(unsigned long reset)
+{
+ u32 tmp;
+ GIE_DISABLE();
+ tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
+ tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
+ __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
+ cpu_dcache_wbinval_all();
+ cpu_icache_inval_all();
+
+ __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
+}
+
+void cpu_switch_mm(struct mm_struct *mm)
+{
+ unsigned long cid;
+ cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+ cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
+ __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
+ __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
+}
diff --git a/arch/nds32/mm/tlb.c b/arch/nds32/mm/tlb.c
new file mode 100644
index 000000000000..dd41f5e0712f
--- /dev/null
+++ b/arch/nds32/mm/tlb.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/spinlock_types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/nds32.h>
+#include <nds32_intrinsic.h>
+
+unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) };
+
+DEFINE_SPINLOCK(cid_lock);
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ unsigned long flags, ocid, ncid;
+
+ if ((end - start) > 0x400000) {
+ __nds32__tlbop_flua();
+ __nds32__isb();
+ return;
+ }
+
+ spin_lock_irqsave(&cid_lock, flags);
+ ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+ ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
+ __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
+ while (start < end) {
+ __nds32__tlbop_inv(start);
+ __nds32__isb();
+ start += PAGE_SIZE;
+ }
+ __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
+ spin_unlock_irqrestore(&cid_lock, flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long flags, ocid, ncid;
+
+ spin_lock_irqsave(&cid_lock, flags);
+ ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+ ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
+ __nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
+ __nds32__tlbop_inv(addr);
+ __nds32__isb();
+ __nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
+ spin_unlock_irqrestore(&cid_lock, flags);
+}
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
index ce072ba0f8dd..9010243077ab 100644
--- a/arch/nios2/include/asm/io.h
+++ b/arch/nios2/include/asm/io.h
@@ -45,6 +45,7 @@ static inline void iounmap(void __iomem *addr)
__iounmap(addr);
}
+#define ioremap_nocache ioremap_nocache
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index 7c691399da3f..6709b28a0221 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -29,13 +29,14 @@
#define PIO_OFFSET 0
#define PIO_MASK 0
+#define ioremap_nocache ioremap_nocache
#include <asm-generic/io.h>
#include <asm/pgtable.h>
extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
pgprot_t prot);
-static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
{
return __ioremap(offset, size, PAGE_KERNEL);
}
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index cd51a89b393c..df2dc1784673 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -127,12 +127,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst,
* Bus number may be embedded in the higher bits of the physical address.
* This is why we have no bus number argument to ioremap().
*/
-void __iomem *ioremap(unsigned long offset, unsigned long size);
-#define ioremap_nocache(X,Y) ioremap((X),(Y))
-#define ioremap_wc(X,Y) ioremap((X),(Y))
-#define ioremap_wt(X,Y) ioremap((X),(Y))
void iounmap(volatile void __iomem *addr);
-
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
void ioport_unmap(void __iomem *);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 7eeef80c02f7..3bcef9ce74df 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -122,12 +122,12 @@ static void xres_free(struct xresource *xrp) {
*
* Bus type is always zero on IIep.
*/
-void __iomem *ioremap(unsigned long offset, unsigned long size)
+void __iomem *ioremap(phys_addr_t offset, size_t size)
{
char name[14];
sprintf(name, "phys_%08x", (u32)offset);
- return _sparc_alloc_io(0, offset, size, name);
+ return _sparc_alloc_io(0, (unsigned long)offset, size, name);
}
EXPORT_SYMBOL(ioremap);
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index c38e5a732d86..acc5bb2cf1c7 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -52,6 +52,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
return xtensa_ioremap_cache(offset, size);
}
#define ioremap_cache ioremap_cache
+#define ioremap_nocache ioremap_nocache
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index d2e5382821a4..14c9796e37ac 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -592,4 +592,13 @@ config CLKSRC_ST_LPC
Enable this option to use the Low Power controller timer
as clocksource.
+config ATCPIT100_TIMER
+ bool "ATCPIT100 timer driver"
+ depends on NDS32 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ default NDS32
+ help
+ This option enables support for the Andestech ATCPIT100 timers.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index d6dec4489d66..a79523b22e52 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -76,3 +76,4 @@ obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
+obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
diff --git a/drivers/clocksource/timer-atcpit100.c b/drivers/clocksource/timer-atcpit100.c
new file mode 100644
index 000000000000..5e23d7b4a722
--- /dev/null
+++ b/drivers/clocksource/timer-atcpit100.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+/*
+ * Andestech ATCPIT100 Timer Device Driver Implementation
+ * Rick Chen, Andes Technology Corporation <rick@andestech.com>
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/sched_clock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include "timer-of.h"
+#ifdef CONFIG_NDS32
+#include <asm/vdso_timer_info.h>
+#endif
+
+/*
+ * Definition of register offsets
+ */
+
+/* ID and Revision Register */
+#define ID_REV 0x0
+
+/* Configuration Register */
+#define CFG 0x10
+
+/* Interrupt Enable Register */
+#define INT_EN 0x14
+#define CH_INT_EN(c, i) ((1<<i)<<(4*c))
+#define CH0INT0EN 0x01
+
+/* Interrupt Status Register */
+#define INT_STA 0x18
+#define CH0INT0 0x01
+
+/* Channel Enable Register */
+#define CH_EN 0x1C
+#define CH0TMR0EN 0x1
+#define CH1TMR0EN 0x10
+
+/* Channel 0 , 1 Control Register */
+#define CH0_CTL (0x20)
+#define CH1_CTL (0x20 + 0x10)
+
+/* Channel clock source , bit 3 , 0:External clock , 1:APB clock */
+#define APB_CLK BIT(3)
+
+/* Channel mode , bit 0~2 */
+#define TMR_32 0x1
+#define TMR_16 0x2
+#define TMR_8 0x3
+
+/* Channel 0 , 1 Reload Register */
+#define CH0_REL (0x24)
+#define CH1_REL (0x24 + 0x10)
+
+/* Channel 0 , 1 Counter Register */
+#define CH0_CNT (0x28)
+#define CH1_CNT (0x28 + 0x10)
+
+#define TIMER_SYNC_TICKS 3
+
+static void atcpit100_ch1_tmr0_en(void __iomem *base)
+{
+ writel(~0, base + CH1_REL);
+ writel(APB_CLK|TMR_32, base + CH1_CTL);
+}
+
+static void atcpit100_ch0_tmr0_en(void __iomem *base)
+{
+ writel(APB_CLK|TMR_32, base + CH0_CTL);
+}
+
+static void atcpit100_clkevt_time_setup(void __iomem *base, unsigned long delay)
+{
+ writel(delay, base + CH0_CNT);
+ writel(delay, base + CH0_REL);
+}
+
+static void atcpit100_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val;
+
+ val = readl(base + INT_STA);
+ writel(val | CH0INT0, base + INT_STA);
+}
+
+static void atcpit100_clocksource_start(void __iomem *base)
+{
+ u32 val;
+
+ val = readl(base + CH_EN);
+ writel(val | CH1TMR0EN, base + CH_EN);
+}
+
+static void atcpit100_clkevt_time_start(void __iomem *base)
+{
+ u32 val;
+
+ val = readl(base + CH_EN);
+ writel(val | CH0TMR0EN, base + CH_EN);
+}
+
+static void atcpit100_clkevt_time_stop(void __iomem *base)
+{
+ u32 val;
+
+ atcpit100_timer_clear_interrupt(base);
+ val = readl(base + CH_EN);
+ writel(val & ~CH0TMR0EN, base + CH_EN);
+}
+
+static int atcpit100_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *clkevt)
+{
+ u32 val;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ val = readl(timer_of_base(to) + CH_EN);
+ writel(val & ~CH0TMR0EN, timer_of_base(to) + CH_EN);
+ writel(evt, timer_of_base(to) + CH0_REL);
+ writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
+
+ return 0;
+}
+
+static int atcpit100_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+
+ atcpit100_clkevt_time_setup(timer_of_base(to), timer_of_period(to));
+ atcpit100_clkevt_time_start(timer_of_base(to));
+
+ return 0;
+}
+static int atcpit100_clkevt_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+
+ atcpit100_clkevt_time_stop(timer_of_base(to));
+
+ return 0;
+}
+static int atcpit100_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ u32 val;
+
+ writel(~0x0, timer_of_base(to) + CH0_REL);
+ val = readl(timer_of_base(to) + CH_EN);
+ writel(val | CH0TMR0EN, timer_of_base(to) + CH_EN);
+
+ return 0;
+}
+
+static irqreturn_t atcpit100_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(evt);
+
+ atcpit100_timer_clear_interrupt(timer_of_base(to));
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "atcpit100_tick",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = atcpit100_clkevt_shutdown,
+ .set_state_periodic = atcpit100_clkevt_set_periodic,
+ .set_state_oneshot = atcpit100_clkevt_set_oneshot,
+ .tick_resume = atcpit100_clkevt_shutdown,
+ .set_next_event = atcpit100_clkevt_next_event,
+ .cpumask = cpu_all_mask,
+ },
+
+ .of_irq = {
+ .handler = atcpit100_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+
+ /*
+ * FIXME: we currently only support clocking using PCLK
+ * and using EXTCLK is not supported in the driver.
+ */
+ .of_clk = {
+ .name = "PCLK",
+ }
+};
+
+static u64 notrace atcpit100_timer_sched_read(void)
+{
+ return ~readl(timer_of_base(&to) + CH1_CNT);
+}
+
+#ifdef CONFIG_NDS32
+static void fill_vdso_need_info(struct device_node *node)
+{
+ struct resource timer_res;
+ of_address_to_resource(node, 0, &timer_res);
+ timer_info.mapping_base = (unsigned long)timer_res.start;
+ timer_info.cycle_count_down = true;
+ timer_info.cycle_count_reg_offset = CH1_CNT;
+}
+#endif
+
+static int __init atcpit100_timer_init(struct device_node *node)
+{
+ int ret;
+ u32 val;
+ void __iomem *base;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ return ret;
+
+ base = timer_of_base(&to);
+
+ sched_clock_register(atcpit100_timer_sched_read, 32,
+ timer_of_rate(&to));
+
+ ret = clocksource_mmio_init(base + CH1_CNT,
+ node->name, timer_of_rate(&to), 300, 32,
+ clocksource_mmio_readl_down);
+
+ if (ret) {
+ pr_err("Failed to register clocksource\n");
+ return ret;
+ }
+
+ /* clear channel 0 timer0 interrupt */
+ atcpit100_timer_clear_interrupt(base);
+
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+ atcpit100_ch0_tmr0_en(base);
+ atcpit100_ch1_tmr0_en(base);
+ atcpit100_clocksource_start(base);
+ atcpit100_clkevt_time_start(base);
+
+ /* Enable channel 0 timer0 interrupt */
+ val = readl(base + INT_EN);
+ writel(val | CH0INT0EN, base + INT_EN);
+
+#ifdef CONFIG_NDS32
+ fill_vdso_need_info(node);
+#endif
+
+ return ret;
+}
+
+TIMER_OF_DECLARE(atcpit100, "andestech,atcpit100", atcpit100_timer_init);
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d27e3e3619e0..de7cf483e1b7 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
+obj-$(CONFIG_NDS32) += irq-ativic32.o
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
new file mode 100644
index 000000000000..f69a8588521c
--- /dev/null
+++ b/drivers/irqchip/irq-ativic32.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <nds32_intrinsic.h>
+
+static void ativic32_ack_irq(struct irq_data *data)
+{
+ __nds32__mtsr_dsb(BIT(data->hwirq), NDS32_SR_INT_PEND2);
+}
+
+static void ativic32_mask_irq(struct irq_data *data)
+{
+ unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
+ __nds32__mtsr_dsb(int_mask2 & (~(BIT(data->hwirq))), NDS32_SR_INT_MASK2);
+}
+
+static void ativic32_unmask_irq(struct irq_data *data)
+{
+ unsigned long int_mask2 = __nds32__mfsr(NDS32_SR_INT_MASK2);
+ __nds32__mtsr_dsb(int_mask2 | (BIT(data->hwirq)), NDS32_SR_INT_MASK2);
+}
+
+static struct irq_chip ativic32_chip = {
+ .name = "ativic32",
+ .irq_ack = ativic32_ack_irq,
+ .irq_mask = ativic32_mask_irq,
+ .irq_unmask = ativic32_unmask_irq,
+};
+
+static unsigned int __initdata nivic_map[6] = { 6, 2, 10, 16, 24, 32 };
+
+static struct irq_domain *root_domain;
+static int ativic32_irq_domain_map(struct irq_domain *id, unsigned int virq,
+ irq_hw_number_t hw)
+{
+
+ unsigned long int_trigger_type;
+ u32 type;
+ struct irq_data *irq_data;
+ int_trigger_type = __nds32__mfsr(NDS32_SR_INT_TRIGGER);
+ irq_data = irq_get_irq_data(virq);
+ if (!irq_data)
+ return -EINVAL;
+
+ if (int_trigger_type & (BIT(hw))) {
+ irq_set_chip_and_handler(virq, &ativic32_chip, handle_edge_irq);
+ type = IRQ_TYPE_EDGE_RISING;
+ } else {
+ irq_set_chip_and_handler(virq, &ativic32_chip, handle_level_irq);
+ type = IRQ_TYPE_LEVEL_HIGH;
+ }
+
+ irqd_set_trigger_type(irq_data, type);
+ return 0;
+}
+
+static struct irq_domain_ops ativic32_ops = {
+ .map = ativic32_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell
+};
+
+static irq_hw_number_t get_intr_src(void)
+{
+ return ((__nds32__mfsr(NDS32_SR_ITYPE) & ITYPE_mskVECTOR) >> ITYPE_offVECTOR)
+ - NDS32_VECTOR_offINTERRUPT;
+}
+
+asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+{
+ irq_hw_number_t hwirq = get_intr_src();
+ handle_domain_irq(root_domain, hwirq, regs);
+}
+
+int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
+{
+ unsigned long int_vec_base, nivic, nr_ints;
+
+ if (WARN(parent, "non-root ativic32 are not supported"))
+ return -EINVAL;
+
+ int_vec_base = __nds32__mfsr(NDS32_SR_IVB);
+
+ if (((int_vec_base & IVB_mskIVIC_VER) >> IVB_offIVIC_VER) == 0)
+ panic("Unable to use atcivic32 for this cpu.\n");
+
+ nivic = (int_vec_base & IVB_mskNIVIC) >> IVB_offNIVIC;
+ if (nivic >= ARRAY_SIZE(nivic_map))
+ panic("The number of input for ativic32 is not supported.\n");
+
+ nr_ints = nivic_map[nivic];
+
+ root_domain = irq_domain_add_linear(node, nr_ints,
+ &ativic32_ops, NULL);
+
+ if (!root_domain)
+ panic("%s: unable to create IRQ domain\n", node->full_name);
+
+ return 0;
+}
+IRQCHIP_DECLARE(ativic32, "andestech,ativic32", ativic32_init_irq);
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 040c7f163325..0fb8df656677 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FARADAY
bool "Faraday devices"
default y
- depends on ARM
+ depends on ARM || NDS32 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,7 +18,8 @@ if NET_VENDOR_FARADAY
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM
+ depends on ARM || NDS32 || COMPILE_TEST
+ depends on !64BIT || BROKEN
select MII
---help---
This driver supports the FTMAC100 10/100 Ethernet controller
@@ -27,7 +28,8 @@ config FTMAC100
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM
+ depends on ARM || NDS32 || COMPILE_TEST
+ depends on !64BIT || BROKEN
select PHYLIB
---help---
This driver supports the FTGMAC100 Gigabit Ethernet controller
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 7f1f1fbcef9e..27bb893cf6b2 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@ config VGA_CONSOLE
depends on !4xx && !PPC_8xx && !SPARC && !M68K && !PARISC && !FRV && \
!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
- !ARM64 && !ARC && !MICROBLAZE && !OPENRISC
+ !ARM64 && !ARC && !MICROBLAZE && !OPENRISC && !NDS32
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index b4531e3b2120..7c6a39e64749 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -852,7 +852,16 @@ static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
}
#endif
+#ifndef iounmap
+#define iounmap iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+}
+#endif
+#endif /* CONFIG_MMU */
#ifndef ioremap_nocache
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
#define ioremap_nocache ioremap_nocache
static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
{
@@ -884,15 +893,6 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
}
#endif
-#ifndef iounmap
-#define iounmap iounmap
-
-static inline void iounmap(void __iomem *addr)
-{
-}
-#endif
-#endif /* CONFIG_MMU */
-
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
OpenPOWER on IntegriCloud