From 0becb088501886f37ade38762c8eaaf4263572cc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:53 +0100 Subject: Thumb-2: Add macros for the unified assembler syntax This patch adds various C and assembler macros that help with using the unified assembler syntax for compiling files to either ARM or Thumb-2 modes. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/unified.h | 126 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 arch/arm/include/asm/unified.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h new file mode 100644 index 000000000000..073e85b9b961 --- /dev/null +++ b/arch/arm/include/asm/unified.h @@ -0,0 +1,126 @@ +/* + * include/asm-arm/unified.h - Unified Assembler Syntax helper macros + * + * Copyright (C) 2008 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_UNIFIED_H +#define __ASM_UNIFIED_H + +#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) + .syntax unified +#endif + +#ifdef CONFIG_THUMB2_KERNEL + +#if __GNUC__ < 4 +#error Thumb-2 kernel requires gcc >= 4 +#endif + +/* The CPSR bit describing the instruction set (Thumb) */ +#define PSR_ISETSTATE PSR_T_BIT + +#define ARM(x...) +#define THUMB(x...) x +#define W(instr) instr.w +#define BSYM(sym) sym + 1 + +#else /* !CONFIG_THUMB2_KERNEL */ + +/* The CPSR bit describing the instruction set (ARM) */ +#define PSR_ISETSTATE 0 + +#define ARM(x...) x +#define THUMB(x...) +#define W(instr) instr +#define BSYM(sym) sym + +#endif /* CONFIG_THUMB2_KERNEL */ + +#ifndef CONFIG_ARM_ASM_UNIFIED + +/* + * If the unified assembly syntax isn't used (in ARM mode), these + * macros expand to an empty string + */ +#ifdef __ASSEMBLY__ + .macro it, cond + .endm + .macro itt, cond + .endm + .macro ite, cond + .endm + .macro ittt, cond + .endm + .macro itte, cond + .endm + .macro itet, cond + .endm + .macro itee, cond + .endm + .macro itttt, cond + .endm + .macro ittte, cond + .endm + .macro ittet, cond + .endm + .macro ittee, cond + .endm + .macro itett, cond + .endm + .macro itete, cond + .endm + .macro iteet, cond + .endm + .macro iteee, cond + .endm +#else /* !__ASSEMBLY__ */ +__asm__( +" .macro it, cond\n" +" .endm\n" +" .macro itt, cond\n" +" .endm\n" +" .macro ite, cond\n" +" .endm\n" +" .macro ittt, cond\n" +" .endm\n" +" .macro itte, cond\n" +" .endm\n" +" .macro itet, cond\n" +" .endm\n" +" .macro itee, cond\n" +" .endm\n" +" .macro itttt, cond\n" +" .endm\n" +" .macro ittte, cond\n" +" .endm\n" +" .macro ittet, cond\n" +" .endm\n" +" .macro ittee, cond\n" +" .endm\n" +" .macro itett, cond\n" +" .endm\n" +" .macro itete, cond\n" +" .endm\n" +" .macro iteet, cond\n" +" .endm\n" +" .macro iteee, cond\n" +" .endm\n"); +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_ASM_UNIFIED */ + +#endif /* !__ASM_UNIFIED_H */ -- cgit v1.2.1 From b86040a59feb255a8193173caa4d5199464433d5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:54 +0100 Subject: Thumb-2: Implementation of the unified start-up and exceptions code This patch implements the ARM/Thumb-2 unified kernel start-up and exception handling code. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/assembler.h | 11 +++++++++++ arch/arm/include/asm/futex.h | 1 + 2 files changed, 12 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a092b700..1acd1da36d00 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -127,3 +127,14 @@ #endif #endif .endm + +#ifdef CONFIG_THUMB2_KERNEL + .macro setmode, mode, reg + mov \reg, #\mode + msr cpsr_c, \reg + .endm +#else + .macro setmode, mode, reg + msr cpsr_c, #\mode + .endm +#endif diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 9ee743b95de8..bfcc15929a7f 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -99,6 +99,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrt %0, [%3]\n" " teq %0, %1\n" + " it eq @ explicit IT needed for the 2b label\n" "2: streqt %2, [%3]\n" "3:\n" " .section __ex_table,\"a\"\n" -- cgit v1.2.1 From 8b592783a2e8b7721a99730bd549aab5208f36af Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:57 +0100 Subject: Thumb-2: Implement the unified arch/arm/lib functions This patch adds the ARM/Thumb-2 unified support for the arch/arm/lib/* files. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/assembler.h | 73 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/uaccess.h | 7 ++-- 2 files changed, 78 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 1acd1da36d00..2b60c7d05770 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -138,3 +138,76 @@ msr cpsr_c, #\mode .endm #endif + +/* + * STRT/LDRT access macros with ARM and Thumb-2 variants + */ +#ifdef CONFIG_THUMB2_KERNEL + + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr, #\off] + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr, #\off] + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endm + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + @ explicit IT instruction needed because of the label + @ introduced by the USER macro + .ifnc \cond,al + .if \rept == 1 + itt \cond + .elseif \rept == 2 + ittt \cond + .else + .error "Unsupported rept macro argument" + .endif + .endif + + @ Slightly optimised to avoid incrementing the pointer twice + usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort + .if \rept == 2 + usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort + .endif + + add\cond \ptr, #\rept * \inc + .endm + +#else /* !CONFIG_THUMB2_KERNEL */ + + .macro usracc, instr, reg, ptr, inc, cond, rept, abort + .rept \rept +9999: + .if \inc == 1 + \instr\cond\()bt \reg, [\ptr], #\inc + .elseif \inc == 4 + \instr\cond\()t \reg, [\ptr], #\inc + .else + .error "Unsupported inc macro argument" + .endif + + .section __ex_table,"a" + .align 3 + .long 9999b, \abort + .previous + .endr + .endm + +#endif /* CONFIG_THUMB2_KERNEL */ + + .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc str, \reg, \ptr, \inc, \cond, \rept, \abort + .endm + + .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f + usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort + .endm diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0da9bc9b3b1d..1d6bd40a4322 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -17,6 +17,7 @@ #include #include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -365,8 +366,10 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1]\n" \ + ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ -- cgit v1.2.1 From adca6dc23bc620ea95392659625200a252b97be3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:32:59 +0100 Subject: Thumb-2: Add support for loadable modules Modules compiled to Thumb-2 have two additional relocations needing to be resolved at load time, R_ARM_THM_CALL and R_ARM_THM_JUMP24, for BL and B.W instructions. The maximum Thumb-2 addressing range is +/-2^24 (+/-16MB) therefore the MODULES_VADDR macro in asm/memory.h is set to (MODULES_END - 8MB) for the Thumb-2 compiled kernel. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/elf.h | 3 +++ arch/arm/include/asm/memory.h | 6 ++++++ 2 files changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index c207504de84d..c3b911ee9151 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -55,6 +55,9 @@ typedef struct user_fp elf_fpregset_t; #define R_ARM_MOVW_ABS_NC 43 #define R_ARM_MOVT_ABS 44 +#define R_ARM_THM_CALL 10 +#define R_ARM_THM_JUMP24 30 + /* * These are used to set parameters in the core dumps. */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 85763db87449..376be1a62866 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -44,7 +44,13 @@ * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ +#ifndef CONFIG_THUMB2_KERNEL #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) +#else +/* smaller range for Thumb-2 symbols relocation (2^24)*/ +#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) +#endif + #if TASK_SIZE > MODULES_VADDR #error Top of user space clashes with start of module space #endif -- cgit v1.2.1 From 9a45f026bb3ba720765d46f62f5c464d3317a8ab Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:34:56 +0100 Subject: nommu: Remove the context.id from asm-offsets.c when !MMU There is no MMU context switching on MMU-less systems. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/mmu_context.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 263fed05ea33..bcdb9291ef0c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -62,8 +62,10 @@ static inline void check_context(struct mm_struct *mm) static inline void check_context(struct mm_struct *mm) { +#ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); +#endif } #define init_new_context(tsk,mm) 0 -- cgit v1.2.1 From 68b7f7153fa58df710924fbb79722717d2d16094 Mon Sep 17 00:00:00 2001 From: Paul Brook Date: Fri, 24 Jul 2009 12:34:58 +0100 Subject: nommu: ptrace support The patch below adds ARM ptrace functions to get the process load address. This is required for useful userspace debugging on mmuless systems. These values are obtained by reading magic offsets with PTRACE_PEEKUSR, as on other nommu targets. I picked arbitrary large values for the offsets. Signed-off-by: Paul Brook --- arch/arm/include/asm/ptrace.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 67b833c9b6b9..bbecccda76d0 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -82,6 +82,14 @@ #define PSR_ENDSTATE 0 #endif +/* + * These are 'magic' values for PTRACE_PEEKUSR that return info about where a + * process is located in memory. + */ +#define PT_TEXT_ADDR 0x10000 +#define PT_DATA_ADDR 0x10004 +#define PT_TEXT_END_ADDR 0x10008 + #ifndef __ASSEMBLY__ /* -- cgit v1.2.1 From 2732f4b6f11689fa08e2db5689fc652d608936b5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 24 Jul 2009 12:35:01 +0100 Subject: nommu: Remove the memory_start/end variables from ARM page-nommu.h These variables do not seem to be used anywhere in the kernel. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/page-nommu.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 3574c0deb37f..d1b162a18dcb 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -43,7 +43,4 @@ typedef unsigned long pgprot_t; #define __pmd(x) (x) #define __pgprot(x) (x) -extern unsigned long memory_start; -extern unsigned long memory_end; - #endif -- cgit v1.2.1 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 321c83e43a1e..f41a6f57cd12 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) +#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) -- cgit v1.2.1 From 181f817eaaca4c1f8a9c265d339d2b96de8b245d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 13 Aug 2009 20:38:16 +0200 Subject: [ARM] support tracing when using newer compilers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since gcc 4.4 the name and calling convention for function profiling on ARM changed. With this patch both types are supported. See http://sourceware.org/ml/libc-ports/2008-04/msg00009.html for some details. Lightly-Tested-by: Anand Gadiyar Tested-by: Kevin Hilman Signed-off-by: Uwe Kleine-König --- arch/arm/include/asm/ftrace.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 39c8bc1a006a..0d4c478e01b6 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ extern void mcount(void); +extern void __gnu_mcount_nc(void); #endif #endif -- cgit v1.2.1 From 0d928b0b616d1c5c5fe76019a87cba171ca91633 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 13 Aug 2009 20:38:17 +0200 Subject: Complete irq tracing support for ARM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this patch enabling and disabling irqs in assembler code and by the hardware wasn't tracked completly. I had to transpose two instructions in arch/arm/lib/bitops.h because restore_irqs doesn't preserve the flags with CONFIG_TRACE_IRQFLAGS=y Signed-off-by: Uwe Kleine-König Cc: Russell King Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Uwe Kleine-König --- arch/arm/include/asm/assembler.h | 49 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 15f8a092b700..44912cd5da13 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -74,23 +74,56 @@ * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 - .macro disable_irq + .macro disable_irq_notrace cpsid i .endm - .macro enable_irq + .macro enable_irq_notrace cpsie i .endm #else - .macro disable_irq + .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm - .macro enable_irq + .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif + .macro asm_trace_hardirqs_off +#if defined(CONFIG_TRACE_IRQFLAGS) + stmdb sp!, {r0-r3, ip, lr} + bl trace_hardirqs_off + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on_cond, cond +#if defined(CONFIG_TRACE_IRQFLAGS) + /* + * actually the registers should be pushed and pop'd conditionally, but + * after bl the flags are certainly clobbered + */ + stmdb sp!, {r0-r3, ip, lr} + bl\cond trace_hardirqs_on + ldmia sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro asm_trace_hardirqs_on + asm_trace_hardirqs_on_cond al + .endm + + .macro disable_irq + disable_irq_notrace + asm_trace_hardirqs_off + .endm + + .macro enable_irq + asm_trace_hardirqs_on + enable_irq_notrace + .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. @@ -104,10 +137,16 @@ * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ - .macro restore_irqs, oldcpsr + .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm + .macro restore_irqs, oldcpsr + tst \oldcpsr, #PSR_I_BIT + asm_trace_hardirqs_on_cond eq + restore_irqs_notrace \oldcpsr + .endm + #define USER(x...) \ 9999: x; \ .section __ex_table,"a"; \ -- cgit v1.2.1