summaryrefslogtreecommitdiffstats
path: root/arch/arm64/lib/copy_to_user.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 14:47:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 14:47:13 -0800
commit2dc10ad81fc017837037e60439662e1b16bdffb9 (patch)
treefc2f77874339b2f79499e3b34dc5ecb496b68dfc /arch/arm64/lib/copy_to_user.S
parente627078a0cbdc0c391efeb5a2c4eb287328fd633 (diff)
parentf8f8bdc48851da979c6e0e4808b6031122e4af47 (diff)
downloadblackbird-op-linux-2dc10ad81fc017837037e60439662e1b16bdffb9.tar.gz
blackbird-op-linux-2dc10ad81fc017837037e60439662e1b16bdffb9.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - "genirq: Introduce generic irq migration for cpu hotunplugged" patch merged from tip/irq/for-arm to allow the arm64-specific part to be upstreamed via the arm64 tree - CPU feature detection reworked to cope with heterogeneous systems where CPUs may not have exactly the same features. The features reported by the kernel via internal data structures or ELF_HWCAP are delayed until all the CPUs are up (and before user space starts) - Support for 16KB pages, with the additional bonus of a 36-bit VA space, though the latter only depending on EXPERT - Implement native {relaxed, acquire, release} atomics for arm64 - New ASID allocation algorithm which avoids IPI on roll-over, together with TLB invalidation optimisations (using local vs global where feasible) - KASan support for arm64 - EFI_STUB clean-up and isolation for the kernel proper (required by KASan) - copy_{to,from,in}_user optimisations (sharing the memcpy template) - perf: moving arm64 to the arm32/64 shared PMU framework - L1_CACHE_BYTES increased to 128 to accommodate Cavium hardware - Support for the contiguous PTE hint on kernel mapping (16 consecutive entries may be able to use a single TLB entry) - Generic CONFIG_HZ now used on arm64 - defconfig updates * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (91 commits) arm64/efi: fix libstub build under CONFIG_MODVERSIONS ARM64: Enable multi-core scheduler support by default arm64/efi: move arm64 specific stub C code to libstub arm64: page-align sections for DEBUG_RODATA arm64: Fix build with CONFIG_ZONE_DMA=n arm64: Fix compat register mappings arm64: Increase the max granular size arm64: remove bogus TASK_SIZE_64 check arm64: make Timer Interrupt Frequency selectable arm64/mm: use PAGE_ALIGNED instead of IS_ALIGNED arm64: cachetype: fix definitions of ICACHEF_* flags arm64: cpufeature: declare enable_cpu_capabilities as static genirq: Make the cpuhotplug migration code less noisy arm64: Constify hwcap name string arrays arm64/kvm: Make use of the system wide safe values arm64/debug: Make use of the system wide safe value arm64: Move FP/ASIMD hwcap handling to common code arm64/HWCAP: Use system wide safe values arm64/capabilities: Make use of system wide safe value arm64: Delay cpu feature capability checks ...
Diffstat (limited to 'arch/arm64/lib/copy_to_user.S')
-rw-r--r--arch/arm64/lib/copy_to_user.S67
1 files changed, 38 insertions, 29 deletions
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a257b47e2dc4..7512bbbc07ac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -18,6 +18,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
@@ -31,44 +32,52 @@
* Returns:
* x0 - bytes not copied
*/
+ .macro ldrb1 ptr, regB, val
+ ldrb \ptr, [\regB], \val
+ .endm
+
+ .macro strb1 ptr, regB, val
+ USER(9998f, strb \ptr, [\regB], \val)
+ .endm
+
+ .macro ldrh1 ptr, regB, val
+ ldrh \ptr, [\regB], \val
+ .endm
+
+ .macro strh1 ptr, regB, val
+ USER(9998f, strh \ptr, [\regB], \val)
+ .endm
+
+ .macro ldr1 ptr, regB, val
+ ldr \ptr, [\regB], \val
+ .endm
+
+ .macro str1 ptr, regB, val
+ USER(9998f, str \ptr, [\regB], \val)
+ .endm
+
+ .macro ldp1 ptr, regB, regC, val
+ ldp \ptr, \regB, [\regC], \val
+ .endm
+
+ .macro stp1 ptr, regB, regC, val
+ USER(9998f, stp \ptr, \regB, [\regC], \val)
+ .endm
+
+end .req x5
ENTRY(__copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
- add x5, x0, x2 // upper user buffer boundary
- subs x2, x2, #16
- b.mi 1f
-0:
- ldp x3, x4, [x1], #16
- subs x2, x2, #16
-USER(9f, stp x3, x4, [x0], #16)
- b.pl 0b
-1: adds x2, x2, #8
- b.mi 2f
- ldr x3, [x1], #8
- sub x2, x2, #8
-USER(9f, str x3, [x0], #8 )
-2: adds x2, x2, #4
- b.mi 3f
- ldr w3, [x1], #4
- sub x2, x2, #4
-USER(9f, str w3, [x0], #4 )
-3: adds x2, x2, #2
- b.mi 4f
- ldrh w3, [x1], #2
- sub x2, x2, #2
-USER(9f, strh w3, [x0], #2 )
-4: adds x2, x2, #1
- b.mi 5f
- ldrb w3, [x1]
-USER(9f, strb w3, [x0] )
-5: mov x0, #0
+ add end, x0, x2
+#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
+ mov x0, #0
ret
ENDPROC(__copy_to_user)
.section .fixup,"ax"
.align 2
-9: sub x0, x5, x0 // bytes not copied
+9998: sub x0, end, dst // bytes not copied
ret
.previous
OpenPOWER on IntegriCloud