diff options
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-02.ll | 95 | ||||
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-03.ll | 95 | ||||
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-04.ll | 11 | ||||
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-05.ll | 11 | ||||
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-06.ll | 11 | ||||
-rw-r--r-- | llvm/test/CodeGen/SystemZ/bswap-07.ll | 12 |
6 files changed, 50 insertions, 185 deletions
diff --git a/llvm/test/CodeGen/SystemZ/bswap-02.ll b/llvm/test/CodeGen/SystemZ/bswap-02.ll index 9c964569dfd..32f3f66ce8c 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-02.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-02.ll @@ -86,40 +86,23 @@ define i32 @f7(i64 %src, i64 %index) { ret i32 %swapped } -; Check that volatile accesses do not use LRV, which might access the -; storage multple times. -define i32 @f8(i32 *%src) { -; CHECK-LABEL: f8: -; CHECK: l [[REG:%r[0-5]]], 0(%r2) -; CHECK: lrvr %r2, [[REG]] -; CHECK: br %r14 - %a = load volatile i32 , i32 *%src - %swapped = call i32 @llvm.bswap.i32(i32 %a) - ret i32 %swapped -} - ; Test a case where we spill the source of at least one LRVR. We want ; to use LRV if possible. -define void @f9(i32 *%ptr) { -; CHECK-LABEL: f9: +define i32 @f8(i32 *%ptr0) { +; CHECK-LABEL: f8: ; CHECK: lrv {{%r[0-9]+}}, 16{{[04]}}(%r15) ; CHECK: br %r14 - %val0 = load volatile i32 , i32 *%ptr - %val1 = load volatile i32 , i32 *%ptr - %val2 = load volatile i32 , i32 *%ptr - %val3 = load volatile i32 , i32 *%ptr - %val4 = load volatile i32 , i32 *%ptr - %val5 = load volatile i32 , i32 *%ptr - %val6 = load volatile i32 , i32 *%ptr - %val7 = load volatile i32 , i32 *%ptr - %val8 = load volatile i32 , i32 *%ptr - %val9 = load volatile i32 , i32 *%ptr - %val10 = load volatile i32 , i32 *%ptr - %val11 = load volatile i32 , i32 *%ptr - %val12 = load volatile i32 , i32 *%ptr - %val13 = load volatile i32 , i32 *%ptr - %val14 = load volatile i32 , i32 *%ptr - %val15 = load volatile i32 , i32 *%ptr + + %val0 = call i32 @foo() + %val1 = call i32 @foo() + %val2 = call i32 @foo() + %val3 = call i32 @foo() + %val4 = call i32 @foo() + %val5 = call i32 @foo() + %val6 = call i32 @foo() + %val7 = call i32 @foo() + %val8 = call i32 @foo() + %val9 = call i32 @foo() %swapped0 = call i32 @llvm.bswap.i32(i32 %val0) %swapped1 = call i32 @llvm.bswap.i32(i32 %val1) @@ -131,46 +114,18 @@ define void @f9(i32 *%ptr) { %swapped7 = call i32 @llvm.bswap.i32(i32 %val7) %swapped8 = call i32 @llvm.bswap.i32(i32 %val8) %swapped9 = call i32 @llvm.bswap.i32(i32 %val9) - %swapped10 = call i32 @llvm.bswap.i32(i32 %val10) - %swapped11 = call i32 @llvm.bswap.i32(i32 %val11) - %swapped12 = call i32 @llvm.bswap.i32(i32 %val12) - %swapped13 = call i32 @llvm.bswap.i32(i32 %val13) - %swapped14 = call i32 @llvm.bswap.i32(i32 %val14) - %swapped15 = call i32 @llvm.bswap.i32(i32 %val15) - store volatile i32 %val0, i32 *%ptr - store volatile i32 %val1, i32 *%ptr - store volatile i32 %val2, i32 *%ptr - store volatile i32 %val3, i32 *%ptr - store volatile i32 %val4, i32 *%ptr - store volatile i32 %val5, i32 *%ptr - store volatile i32 %val6, i32 *%ptr - store volatile i32 %val7, i32 *%ptr - store volatile i32 %val8, i32 *%ptr - store volatile i32 %val9, i32 *%ptr - store volatile i32 %val10, i32 *%ptr - store volatile i32 %val11, i32 *%ptr - store volatile i32 %val12, i32 *%ptr - store volatile i32 %val13, i32 *%ptr - store volatile i32 %val14, i32 *%ptr - store volatile i32 %val15, i32 *%ptr + %ret1 = add i32 %swapped0, %swapped1 + %ret2 = add i32 %ret1, %swapped2 + %ret3 = add i32 %ret2, %swapped3 + %ret4 = add i32 %ret3, %swapped4 + %ret5 = add i32 %ret4, %swapped5 + %ret6 = add i32 %ret5, %swapped6 + %ret7 = add i32 %ret6, %swapped7 + %ret8 = add i32 %ret7, %swapped8 + %ret9 = add i32 %ret8, %swapped9 - store volatile i32 %swapped0, i32 *%ptr - store volatile i32 %swapped1, i32 *%ptr - store volatile i32 %swapped2, i32 *%ptr - store volatile i32 %swapped3, i32 *%ptr - store volatile i32 %swapped4, i32 *%ptr - store volatile i32 %swapped5, i32 *%ptr - store volatile i32 %swapped6, i32 *%ptr - store volatile i32 %swapped7, i32 *%ptr - store volatile i32 %swapped8, i32 *%ptr - store volatile i32 %swapped9, i32 *%ptr - store volatile i32 %swapped10, i32 *%ptr - store volatile i32 %swapped11, i32 *%ptr - store volatile i32 %swapped12, i32 *%ptr - store volatile i32 %swapped13, i32 *%ptr - store volatile i32 %swapped14, i32 *%ptr - store volatile i32 %swapped15, i32 *%ptr - - ret void + ret i32 %ret9 } + +declare i32 @foo() diff --git a/llvm/test/CodeGen/SystemZ/bswap-03.ll b/llvm/test/CodeGen/SystemZ/bswap-03.ll index ea62c4f71df..07bb35e4d08 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-03.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-03.ll @@ -86,40 +86,23 @@ define i64 @f7(i64 %src, i64 %index) { ret i64 %swapped } -; Check that volatile accesses do not use LRVG, which might access the -; storage multple times. -define i64 @f8(i64 *%src) { -; CHECK-LABEL: f8: -; CHECK: lg [[REG:%r[0-5]]], 0(%r2) -; CHECK: lrvgr %r2, [[REG]] -; CHECK: br %r14 - %a = load volatile i64 , i64 *%src - %swapped = call i64 @llvm.bswap.i64(i64 %a) - ret i64 %swapped -} - ; Test a case where we spill the source of at least one LRVGR. We want ; to use LRVG if possible. -define void @f9(i64 *%ptr) { -; CHECK-LABEL: f9: +define i64 @f8(i64 *%ptr) { +; CHECK-LABEL: f8: ; CHECK: lrvg {{%r[0-9]+}}, 160(%r15) ; CHECK: br %r14 - %val0 = load volatile i64 , i64 *%ptr - %val1 = load volatile i64 , i64 *%ptr - %val2 = load volatile i64 , i64 *%ptr - %val3 = load volatile i64 , i64 *%ptr - %val4 = load volatile i64 , i64 *%ptr - %val5 = load volatile i64 , i64 *%ptr - %val6 = load volatile i64 , i64 *%ptr - %val7 = load volatile i64 , i64 *%ptr - %val8 = load volatile i64 , i64 *%ptr - %val9 = load volatile i64 , i64 *%ptr - %val10 = load volatile i64 , i64 *%ptr - %val11 = load volatile i64 , i64 *%ptr - %val12 = load volatile i64 , i64 *%ptr - %val13 = load volatile i64 , i64 *%ptr - %val14 = load volatile i64 , i64 *%ptr - %val15 = load volatile i64 , i64 *%ptr + + %val0 = call i64 @foo() + %val1 = call i64 @foo() + %val2 = call i64 @foo() + %val3 = call i64 @foo() + %val4 = call i64 @foo() + %val5 = call i64 @foo() + %val6 = call i64 @foo() + %val7 = call i64 @foo() + %val8 = call i64 @foo() + %val9 = call i64 @foo() %swapped0 = call i64 @llvm.bswap.i64(i64 %val0) %swapped1 = call i64 @llvm.bswap.i64(i64 %val1) @@ -131,46 +114,18 @@ define void @f9(i64 *%ptr) { %swapped7 = call i64 @llvm.bswap.i64(i64 %val7) %swapped8 = call i64 @llvm.bswap.i64(i64 %val8) %swapped9 = call i64 @llvm.bswap.i64(i64 %val9) - %swapped10 = call i64 @llvm.bswap.i64(i64 %val10) - %swapped11 = call i64 @llvm.bswap.i64(i64 %val11) - %swapped12 = call i64 @llvm.bswap.i64(i64 %val12) - %swapped13 = call i64 @llvm.bswap.i64(i64 %val13) - %swapped14 = call i64 @llvm.bswap.i64(i64 %val14) - %swapped15 = call i64 @llvm.bswap.i64(i64 %val15) - store volatile i64 %val0, i64 *%ptr - store volatile i64 %val1, i64 *%ptr - store volatile i64 %val2, i64 *%ptr - store volatile i64 %val3, i64 *%ptr - store volatile i64 %val4, i64 *%ptr - store volatile i64 %val5, i64 *%ptr - store volatile i64 %val6, i64 *%ptr - store volatile i64 %val7, i64 *%ptr - store volatile i64 %val8, i64 *%ptr - store volatile i64 %val9, i64 *%ptr - store volatile i64 %val10, i64 *%ptr - store volatile i64 %val11, i64 *%ptr - store volatile i64 %val12, i64 *%ptr - store volatile i64 %val13, i64 *%ptr - store volatile i64 %val14, i64 *%ptr - store volatile i64 %val15, i64 *%ptr + %ret1 = add i64 %swapped0, %swapped1 + %ret2 = add i64 %ret1, %swapped2 + %ret3 = add i64 %ret2, %swapped3 + %ret4 = add i64 %ret3, %swapped4 + %ret5 = add i64 %ret4, %swapped5 + %ret6 = add i64 %ret5, %swapped6 + %ret7 = add i64 %ret6, %swapped7 + %ret8 = add i64 %ret7, %swapped8 + %ret9 = add i64 %ret8, %swapped9 - store volatile i64 %swapped0, i64 *%ptr - store volatile i64 %swapped1, i64 *%ptr - store volatile i64 %swapped2, i64 *%ptr - store volatile i64 %swapped3, i64 *%ptr - store volatile i64 %swapped4, i64 *%ptr - store volatile i64 %swapped5, i64 *%ptr - store volatile i64 %swapped6, i64 *%ptr - store volatile i64 %swapped7, i64 *%ptr - store volatile i64 %swapped8, i64 *%ptr - store volatile i64 %swapped9, i64 *%ptr - store volatile i64 %swapped10, i64 *%ptr - store volatile i64 %swapped11, i64 *%ptr - store volatile i64 %swapped12, i64 *%ptr - store volatile i64 %swapped13, i64 *%ptr - store volatile i64 %swapped14, i64 *%ptr - store volatile i64 %swapped15, i64 *%ptr - - ret void + ret i64 %ret9 } + +declare i64 @foo() diff --git a/llvm/test/CodeGen/SystemZ/bswap-04.ll b/llvm/test/CodeGen/SystemZ/bswap-04.ll index ce4395210f1..f976ce65b61 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-04.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-04.ll @@ -86,14 +86,3 @@ define void @f7(i64 %src, i64 %index, i32 %a) { ret void } -; Check that volatile stores do not use STRV, which might access the -; storage multple times. -define void @f8(i32 *%dst, i32 %a) { -; CHECK-LABEL: f8: -; CHECK: lrvr [[REG:%r[0-5]]], %r3 -; CHECK: st [[REG]], 0(%r2) -; CHECK: br %r14 - %swapped = call i32 @llvm.bswap.i32(i32 %a) - store volatile i32 %swapped, i32 *%dst - ret void -} diff --git a/llvm/test/CodeGen/SystemZ/bswap-05.ll b/llvm/test/CodeGen/SystemZ/bswap-05.ll index 5f90ef3b9b6..ece3f3181d7 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-05.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-05.ll @@ -86,14 +86,3 @@ define void @f7(i64 %src, i64 %index, i64 %a) { ret void } -; Check that volatile stores do not use STRVG, which might access the -; storage multple times. -define void @f8(i64 *%dst, i64 %a) { -; CHECK-LABEL: f8: -; CHECK: lrvgr [[REG:%r[0-5]]], %r3 -; CHECK: stg [[REG]], 0(%r2) -; CHECK: br %r14 - %swapped = call i64 @llvm.bswap.i64(i64 %a) - store volatile i64 %swapped, i64 *%dst - ret void -} diff --git a/llvm/test/CodeGen/SystemZ/bswap-06.ll b/llvm/test/CodeGen/SystemZ/bswap-06.ll index 19aafe2ca17..a0d379ab391 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-06.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-06.ll @@ -86,14 +86,3 @@ define i16 @f7(i64 %src, i64 %index) { ret i16 %swapped } -; Check that volatile accesses do not use LRVH, which might access the -; storage multple times. -define i16 @f8(i16 *%src) { -; CHECK-LABEL: f8: -; CHECK: lh [[REG:%r[0-5]]], 0(%r2) -; CHECK: lrvr %r2, [[REG]] -; CHECK: br %r14 - %a = load volatile i16 , i16 *%src - %swapped = call i16 @llvm.bswap.i16(i16 %a) - ret i16 %swapped -} diff --git a/llvm/test/CodeGen/SystemZ/bswap-07.ll b/llvm/test/CodeGen/SystemZ/bswap-07.ll index 7f0a265de75..9882006ba58 100644 --- a/llvm/test/CodeGen/SystemZ/bswap-07.ll +++ b/llvm/test/CodeGen/SystemZ/bswap-07.ll @@ -86,15 +86,3 @@ define void @f7(i64 %src, i64 %index, i16 %a) { ret void } -; Check that volatile stores do not use STRVH, which might access the -; storage multple times. -define void @f8(i16 *%dst, i16 %a) { -; CHECK-LABEL: f8: -; CHECK: lrvr [[REG:%r[0-5]]], %r3 -; CHECK: srl [[REG]], 16 -; CHECK: sth [[REG]], 0(%r2) -; CHECK: br %r14 - %swapped = call i16 @llvm.bswap.i16(i16 %a) - store volatile i16 %swapped, i16 *%dst - ret void -} |