diff options
author | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2016-11-22 19:02:07 +0000 |
---|---|---|
committer | Nemanja Ivanovic <nemanja.i.ibm@gmail.com> | 2016-11-22 19:02:07 +0000 |
commit | b8e30d6db638e5f1bb14fc76cd68262eb7b16e24 (patch) | |
tree | 981074d2ac5ce9819c2e10d0548039b1219e93ae /llvm/test/CodeGen/PowerPC/swaps-le-1.ll | |
parent | d1aed9a9e6f4f8dd38d31148272b76db24941071 (diff) | |
download | bcm5719-llvm-b8e30d6db638e5f1bb14fc76cd68262eb7b16e24.tar.gz bcm5719-llvm-b8e30d6db638e5f1bb14fc76cd68262eb7b16e24.zip |
[PowerPC] Emit VMX loads/stores for aligned ops to avoid adding swaps on LE
This patch corresponds to review:
https://reviews.llvm.org/D26861
It also fixes PR30730.
Committing on behalf of Lei Huang.
llvm-svn: 287679
Diffstat (limited to 'llvm/test/CodeGen/PowerPC/swaps-le-1.ll')
-rw-r--r-- | llvm/test/CodeGen/PowerPC/swaps-le-1.ll | 46 |
1 files changed, 26 insertions, 20 deletions
diff --git a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll index cb83bf262b4..29c94f89543 100644 --- a/llvm/test/CodeGen/PowerPC/swaps-le-1.ll +++ b/llvm/test/CodeGen/PowerPC/swaps-le-1.ll @@ -13,6 +13,12 @@ ; RUN: -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s \ ; RUN: | FileCheck -check-prefix=NOOPTSWAP %s +; LH: 2016-11-17 +; Updated align attritue from 16 to 8 to keep swap instructions tests. +; Changes have been made on little-endian to use lvx and stvx +; instructions instead of lxvd2x/xxswapd and xxswapd/stxvd2x for +; aligned vectors with elements up to 4 bytes + ; This test was generated from the following source: ; ; #define N 4096 @@ -29,10 +35,10 @@ ; } ; } -@cb = common global [4096 x i32] zeroinitializer, align 16 -@cc = common global [4096 x i32] zeroinitializer, align 16 -@cd = common global [4096 x i32] zeroinitializer, align 16 -@ca = common global [4096 x i32] zeroinitializer, align 16 +@cb = common global [4096 x i32] zeroinitializer, align 8 +@cc = common global [4096 x i32] zeroinitializer, align 8 +@cd = common global [4096 x i32] zeroinitializer, align 8 +@ca = common global [4096 x i32] zeroinitializer, align 8 define void @foo() { entry: @@ -42,63 +48,63 @@ vector.body: %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ] %0 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index %1 = bitcast i32* %0 to <4 x i32>* - %wide.load = load <4 x i32>, <4 x i32>* %1, align 16 + %wide.load = load <4 x i32>, <4 x i32>* %1, align 8 %2 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index %3 = bitcast i32* %2 to <4 x i32>* - %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 16 + %wide.load13 = load <4 x i32>, <4 x i32>* %3, align 8 %4 = add nsw <4 x i32> %wide.load13, %wide.load %5 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index %6 = bitcast i32* %5 to <4 x i32>* - %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 16 + %wide.load14 = load <4 x i32>, <4 x i32>* %6, align 8 %7 = mul nsw <4 x i32> %4, %wide.load14 %8 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index %9 = bitcast i32* %8 to <4 x i32>* - store <4 x i32> %7, <4 x i32>* %9, align 16 + store <4 x i32> %7, <4 x i32>* %9, align 8 %index.next = add nuw nsw i64 %index, 4 %10 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next %11 = bitcast i32* %10 to <4 x i32>* - %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 16 + %wide.load.1 = load <4 x i32>, <4 x i32>* %11, align 8 %12 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next %13 = bitcast i32* %12 to <4 x i32>* - %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 16 + %wide.load13.1 = load <4 x i32>, <4 x i32>* %13, align 8 %14 = add nsw <4 x i32> %wide.load13.1, %wide.load.1 %15 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next %16 = bitcast i32* %15 to <4 x i32>* - %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 16 + %wide.load14.1 = load <4 x i32>, <4 x i32>* %16, align 8 %17 = mul nsw <4 x i32> %14, %wide.load14.1 %18 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next %19 = bitcast i32* %18 to <4 x i32>* - store <4 x i32> %17, <4 x i32>* %19, align 16 + store <4 x i32> %17, <4 x i32>* %19, align 8 %index.next.1 = add nuw nsw i64 %index.next, 4 %20 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.1 %21 = bitcast i32* %20 to <4 x i32>* - %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 16 + %wide.load.2 = load <4 x i32>, <4 x i32>* %21, align 8 %22 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.1 %23 = bitcast i32* %22 to <4 x i32>* - %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 16 + %wide.load13.2 = load <4 x i32>, <4 x i32>* %23, align 8 %24 = add nsw <4 x i32> %wide.load13.2, %wide.load.2 %25 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.1 %26 = bitcast i32* %25 to <4 x i32>* - %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 16 + %wide.load14.2 = load <4 x i32>, <4 x i32>* %26, align 8 %27 = mul nsw <4 x i32> %24, %wide.load14.2 %28 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.1 %29 = bitcast i32* %28 to <4 x i32>* - store <4 x i32> %27, <4 x i32>* %29, align 16 + store <4 x i32> %27, <4 x i32>* %29, align 8 %index.next.2 = add nuw nsw i64 %index.next.1, 4 %30 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cb, i64 0, i64 %index.next.2 %31 = bitcast i32* %30 to <4 x i32>* - %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 16 + %wide.load.3 = load <4 x i32>, <4 x i32>* %31, align 8 %32 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cc, i64 0, i64 %index.next.2 %33 = bitcast i32* %32 to <4 x i32>* - %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 16 + %wide.load13.3 = load <4 x i32>, <4 x i32>* %33, align 8 %34 = add nsw <4 x i32> %wide.load13.3, %wide.load.3 %35 = getelementptr inbounds [4096 x i32], [4096 x i32]* @cd, i64 0, i64 %index.next.2 %36 = bitcast i32* %35 to <4 x i32>* - %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 16 + %wide.load14.3 = load <4 x i32>, <4 x i32>* %36, align 8 %37 = mul nsw <4 x i32> %34, %wide.load14.3 %38 = getelementptr inbounds [4096 x i32], [4096 x i32]* @ca, i64 0, i64 %index.next.2 %39 = bitcast i32* %38 to <4 x i32>* - store <4 x i32> %37, <4 x i32>* %39, align 16 + store <4 x i32> %37, <4 x i32>* %39, align 8 %index.next.3 = add nuw nsw i64 %index.next.2, 4 %40 = icmp eq i64 %index.next.3, 4096 br i1 %40, label %for.end, label %vector.body |