diff options
| author | Chris Lattner <sabre@nondot.org> | 2011-11-27 06:54:59 +0000 |
|---|---|---|
| committer | Chris Lattner <sabre@nondot.org> | 2011-11-27 06:54:59 +0000 |
| commit | 6a144a2227f8aaf6d4c49bc22f5424a34b778166 (patch) | |
| tree | 3a12469469c418eae050475d7c2cce599c3c5a67 /llvm/test/CodeGen/XCore | |
| parent | ebed15e973d903cfe012bd0875840499602e2f44 (diff) | |
| download | bcm5719-llvm-6a144a2227f8aaf6d4c49bc22f5424a34b778166.tar.gz bcm5719-llvm-6a144a2227f8aaf6d4c49bc22f5424a34b778166.zip | |
Upgrade syntax of tests using volatile instructions to use 'load volatile' instead of 'volatile load', which is archaic.
llvm-svn: 145171
Diffstat (limited to 'llvm/test/CodeGen/XCore')
| -rw-r--r-- | llvm/test/CodeGen/XCore/licm-ldwcp.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/XCore/scavenging.ll | 48 |
2 files changed, 25 insertions, 25 deletions
diff --git a/llvm/test/CodeGen/XCore/licm-ldwcp.ll b/llvm/test/CodeGen/XCore/licm-ldwcp.ll index 4884f70e736..794c6bb64e3 100644 --- a/llvm/test/CodeGen/XCore/licm-ldwcp.ll +++ b/llvm/test/CodeGen/XCore/licm-ldwcp.ll @@ -13,6 +13,6 @@ entry: br label %bb bb: ; preds = %bb, %entry - volatile store i32 525509670, i32* %p, align 4 + store volatile i32 525509670, i32* %p, align 4 br label %bb } diff --git a/llvm/test/CodeGen/XCore/scavenging.ll b/llvm/test/CodeGen/XCore/scavenging.ll index 3181e96116b..5b612d0f9b5 100644 --- a/llvm/test/CodeGen/XCore/scavenging.ll +++ b/llvm/test/CodeGen/XCore/scavenging.ll @@ -18,32 +18,32 @@ entry: %x = alloca [100 x i32], align 4 ; <[100 x i32]*> [#uses=2] %0 = load i32* @size, align 4 ; <i32> [#uses=1] %1 = alloca i32, i32 %0, align 4 ; <i32*> [#uses=1] - %2 = volatile load i32* @g0, align 4 ; <i32> [#uses=1] - %3 = volatile load i32* @g1, align 4 ; <i32> [#uses=1] - %4 = volatile load i32* @g2, align 4 ; <i32> [#uses=1] - %5 = volatile load i32* @g3, align 4 ; <i32> [#uses=1] - %6 = volatile load i32* @g4, align 4 ; <i32> [#uses=1] - %7 = volatile load i32* @g5, align 4 ; <i32> [#uses=1] - %8 = volatile load i32* @g6, align 4 ; <i32> [#uses=1] - %9 = volatile load i32* @g7, align 4 ; <i32> [#uses=1] - %10 = volatile load i32* @g8, align 4 ; <i32> [#uses=1] - %11 = volatile load i32* @g9, align 4 ; <i32> [#uses=1] - %12 = volatile load i32* @g10, align 4 ; <i32> [#uses=1] - %13 = volatile load i32* @g11, align 4 ; <i32> [#uses=2] + %2 = load volatile i32* @g0, align 4 ; <i32> [#uses=1] + %3 = load volatile i32* @g1, align 4 ; <i32> [#uses=1] + %4 = load volatile i32* @g2, align 4 ; <i32> [#uses=1] + %5 = load volatile i32* @g3, align 4 ; <i32> [#uses=1] + %6 = load volatile i32* @g4, align 4 ; <i32> [#uses=1] + %7 = load volatile i32* @g5, align 4 ; <i32> [#uses=1] + %8 = load volatile i32* @g6, align 4 ; <i32> [#uses=1] + %9 = load volatile i32* @g7, align 4 ; <i32> [#uses=1] + %10 = load volatile i32* @g8, align 4 ; <i32> [#uses=1] + %11 = load volatile i32* @g9, align 4 ; <i32> [#uses=1] + %12 = load volatile i32* @g10, align 4 ; <i32> [#uses=1] + %13 = load volatile i32* @g11, align 4 ; <i32> [#uses=2] %14 = getelementptr [100 x i32]* %x, i32 0, i32 50 ; <i32*> [#uses=1] store i32 %13, i32* %14, align 4 - volatile store i32 %13, i32* @g11, align 4 - volatile store i32 %12, i32* @g10, align 4 - volatile store i32 %11, i32* @g9, align 4 - volatile store i32 %10, i32* @g8, align 4 - volatile store i32 %9, i32* @g7, align 4 - volatile store i32 %8, i32* @g6, align 4 - volatile store i32 %7, i32* @g5, align 4 - volatile store i32 %6, i32* @g4, align 4 - volatile store i32 %5, i32* @g3, align 4 - volatile store i32 %4, i32* @g2, align 4 - volatile store i32 %3, i32* @g1, align 4 - volatile store i32 %2, i32* @g0, align 4 + store volatile i32 %13, i32* @g11, align 4 + store volatile i32 %12, i32* @g10, align 4 + store volatile i32 %11, i32* @g9, align 4 + store volatile i32 %10, i32* @g8, align 4 + store volatile i32 %9, i32* @g7, align 4 + store volatile i32 %8, i32* @g6, align 4 + store volatile i32 %7, i32* @g5, align 4 + store volatile i32 %6, i32* @g4, align 4 + store volatile i32 %5, i32* @g3, align 4 + store volatile i32 %4, i32* @g2, align 4 + store volatile i32 %3, i32* @g1, align 4 + store volatile i32 %2, i32* @g0, align 4 %x1 = getelementptr [100 x i32]* %x, i32 0, i32 0 ; <i32*> [#uses=1] call void @g(i32* %x1, i32* %1) nounwind ret void |

