diff options
Diffstat (limited to 'arch/arm/mm/alignment.c')
-rw-r--r-- | arch/arm/mm/alignment.c | 56 |
1 files changed, 41 insertions, 15 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index be7c638b648b..cfbcf8b95599 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,6 +22,7 @@ #include <linux/sched.h> #include <linux/uaccess.h> +#include <asm/system.h> #include <asm/unaligned.h> #include "fault.h" @@ -95,6 +96,33 @@ static const char *usermode_action[] = { "signal+warn" }; +/* Return true if and only if the ARMv6 unaligned access model is in use. */ +static bool cpu_is_v6_unaligned(void) +{ + return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); +} + +static int safe_usermode(int new_usermode, bool warn) +{ + /* + * ARMv6 and later CPUs can perform unaligned accesses for + * most single load and store instructions up to word size. + * LDM, STM, LDRD and STRD still need to be handled. + * + * Ignoring the alignment fault is not an option on these + * CPUs since we spin re-faulting the instruction without + * making any progress. + */ + if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { + new_usermode |= UM_FIXUP; + + if (warn) + printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); + } + + return new_usermode; +} + static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", ai_user); @@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') - ai_usermode = mode - '0'; + ai_usermode = safe_usermode(mode - '0', true); } return count; } @@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_FIXUP) goto fixup; - if (ai_usermode & UM_SIGNAL) - force_sig(SIGBUS, current); - else { + if (ai_usermode & UM_SIGNAL) { + siginfo_t si; + + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_ADRALN; + si.si_addr = (void __user *)addr; + + force_sig_info(si.si_signo, &si, current); + } else { /* * We're about to disable the alignment trap and return to * user space. But if an interrupt occurs before actually @@ -926,20 +961,11 @@ static int __init alignment_init(void) return -ENOMEM; #endif - /* - * ARMv6 and later CPUs can perform unaligned accesses for - * most single load and store instructions up to word size. - * LDM, STM, LDRD and STRD still need to be handled. - * - * Ignoring the alignment fault is not an option on these - * CPUs since we spin re-faulting the instruction without - * making any progress. - */ - if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) { + if (cpu_is_v6_unaligned()) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); - ai_usermode = UM_FIXUP; + ai_usermode = safe_usermode(ai_usermode, false); } hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, |