diff options
author | Andrew Kaylor <andrew.kaylor@intel.com> | 2016-07-29 18:23:18 +0000 |
---|---|---|
committer | Andrew Kaylor <andrew.kaylor@intel.com> | 2016-07-29 18:23:18 +0000 |
commit | b99d1cc7ed2bf3b57e7ee9528d4e0f2d30e65e5c (patch) | |
tree | 0a5b68e0ab47429022e2826a685490cad8812601 /llvm/test/CodeGen/X86/mempcpy.ll | |
parent | 0d56e05a12a98b23f5ca7df05ff53f256d7c5dc7 (diff) | |
download | bcm5719-llvm-b99d1cc7ed2bf3b57e7ee9528d4e0f2d30e65e5c.tar.gz bcm5719-llvm-b99d1cc7ed2bf3b57e7ee9528d4e0f2d30e65e5c.zip |
Recommitting r275284: add support to inline __builtin_mempcpy
Patch by Sunita Marathe
Third try, now following fixes to MSan to handle mempcy in such a way that this commit won't break the MSan buildbots. (Thanks, Evegenii!)
llvm-svn: 277189
Diffstat (limited to 'llvm/test/CodeGen/X86/mempcpy.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/mempcpy.ll | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/mempcpy.ll b/llvm/test/CodeGen/X86/mempcpy.ll new file mode 100644 index 00000000000..1c737b64402 --- /dev/null +++ b/llvm/test/CodeGen/X86/mempcpy.ll @@ -0,0 +1,28 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-linux -O2 | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-linux -O2 | FileCheck %s + +; This test checks that: +; (1) mempcpy is lowered as memcpy, and +; (2) its return value is DST+N i.e. the dst pointer adjusted by the copy size. +; To keep the testing of (2) independent of the exact instructions used to +; adjust the dst pointer, DST+N is explicitly computed and stored to a global +; variable G before the mempcpy call. This instance of DST+N causes the repeat +; DST+N done in the context of the return value of mempcpy to be redundant, and +; the first instance to be reused as the return value. This allows the check for +; (2) to be expressed as verifying that the MOV to store DST+N to G and +; the MOV to copy DST+N to %rax use the same source register. +@G = common global i8* null, align 8 + +; CHECK-LABEL: RET_MEMPCPY: +; CHECK: mov{{.*}} [[REG:%[er][a-z0-9]+]], {{.*}}G +; CHECK: call{{.*}} {{.*}}memcpy +; CHECK: mov{{.*}} [[REG]], %{{[er]}}ax +; +define i8* @RET_MEMPCPY(i8* %DST, i8* %SRC, i64 %N) { + %add.ptr = getelementptr inbounds i8, i8* %DST, i64 %N + store i8* %add.ptr, i8** @G, align 8 + %call = tail call i8* @mempcpy(i8* %DST, i8* %SRC, i64 %N) + ret i8* %call +} + +declare i8* @mempcpy(i8*, i8*, i64) |