diff options
author | Nirav Dave <niravd@google.com> | 2017-07-05 13:08:03 +0000 |
---|---|---|
committer | Nirav Dave <niravd@google.com> | 2017-07-05 13:08:03 +0000 |
commit | 65b7ab1be45bc9719c885fc0da0b5b92db064b7c (patch) | |
tree | 7a931ccec3c290b8d184fd2189e5483c04ed26dd | |
parent | 6a9570c2826a754b779b3bb9446e86edb8cc1d84 (diff) | |
download | bcm5719-llvm-65b7ab1be45bc9719c885fc0da0b5b92db064b7c.tar.gz bcm5719-llvm-65b7ab1be45bc9719c885fc0da0b5b92db064b7c.zip |
[Hexagon] Preclude non-memory test from being optimized away. NFC.
llvm-svn: 307153
-rw-r--r-- | llvm/test/CodeGen/Hexagon/convertdptoint.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/convertdptoll.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/convertsptoint.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/convertsptoll.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/dadd.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/dmul.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/dsub.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/fadd.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/fmul.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/Hexagon/fsub.ll | 8 |
11 files changed, 38 insertions, 38 deletions
diff --git a/llvm/test/CodeGen/Hexagon/convertdptoint.ll b/llvm/test/CodeGen/Hexagon/convertdptoint.ll index a09c2fd14b1..adf76e5dc82 100644 --- a/llvm/test/CodeGen/Hexagon/convertdptoint.ll +++ b/llvm/test/CodeGen/Hexagon/convertdptoint.ll @@ -12,10 +12,10 @@ entry: %b = alloca double, align 8 %c = alloca double, align 8 store i32 0, i32* %retval - store double 1.540000e+01, double* %a, align 8 - store double 9.100000e+00, double* %b, align 8 - %0 = load double, double* %a, align 8 - %1 = load double, double* %b, align 8 + store volatile double 1.540000e+01, double* %a, align 8 + store volatile double 9.100000e+00, double* %b, align 8 + %0 = load volatile double, double* %a, align 8 + %1 = load volatile double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 %2 = load double, double* %c, align 8 diff --git a/llvm/test/CodeGen/Hexagon/convertdptoll.ll b/llvm/test/CodeGen/Hexagon/convertdptoll.ll index f46d46cf76b..6b5bf56a248 100644 --- a/llvm/test/CodeGen/Hexagon/convertdptoll.ll +++ b/llvm/test/CodeGen/Hexagon/convertdptoll.ll @@ -17,8 +17,8 @@ entry: %0 = load double, double* %a, align 8 %1 = load double, double* %b, align 8 %add = fadd double %0, %1 - store double %add, double* %c, align 8 - %2 = load double, double* %c, align 8 + store volatile double %add, double* %c, align 8 + %2 = load volatile double, double* %c, align 8 %conv = fptosi double %2 to i64 store i64 %conv, i64* %i, align 8 %3 = load i64, i64* %i, align 8 diff --git a/llvm/test/CodeGen/Hexagon/convertsptoint.ll b/llvm/test/CodeGen/Hexagon/convertsptoint.ll index 7593e57d852..939b3b06a8c 100644 --- a/llvm/test/CodeGen/Hexagon/convertsptoint.ll +++ b/llvm/test/CodeGen/Hexagon/convertsptoint.ll @@ -17,8 +17,8 @@ entry: %0 = load float, float* %a, align 4 %1 = load float, float* %b, align 4 %add = fadd float %0, %1 - store float %add, float* %c, align 4 - %2 = load float, float* %c, align 4 + store volatile float %add, float* %c, align 4 + %2 = load volatile float, float* %c, align 4 %conv = fptosi float %2 to i32 store i32 %conv, i32* %i, align 4 %3 = load i32, i32* %i, align 4 diff --git a/llvm/test/CodeGen/Hexagon/convertsptoll.ll b/llvm/test/CodeGen/Hexagon/convertsptoll.ll index d8432cbc812..f540397ccf5 100644 --- a/llvm/test/CodeGen/Hexagon/convertsptoll.ll +++ b/llvm/test/CodeGen/Hexagon/convertsptoll.ll @@ -17,8 +17,8 @@ entry: %0 = load float, float* %a, align 4 %1 = load float, float* %b, align 4 %add = fadd float %0, %1 - store float %add, float* %c, align 4 - %2 = load float, float* %c, align 4 + store volatile float %add, float* %c, align 4 + %2 = load volatile float, float* %c, align 4 %conv = fptosi float %2 to i64 store i64 %conv, i64* %i, align 8 %3 = load i64, i64* %i, align 8 diff --git a/llvm/test/CodeGen/Hexagon/dadd.ll b/llvm/test/CodeGen/Hexagon/dadd.ll index 5fcd705bab2..3068f499d12 100644 --- a/llvm/test/CodeGen/Hexagon/dadd.ll +++ b/llvm/test/CodeGen/Hexagon/dadd.ll @@ -9,10 +9,10 @@ entry: %a = alloca double, align 8 %b = alloca double, align 8 %c = alloca double, align 8 - store double 1.540000e+01, double* %a, align 8 - store double 9.100000e+00, double* %b, align 8 - %0 = load double, double* %a, align 8 - %1 = load double, double* %b, align 8 + store volatile double 1.540000e+01, double* %a, align 8 + store volatile double 9.100000e+00, double* %b, align 8 + %0 = load volatile double, double* %a, align 8 + %1 = load volatile double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/dmul.ll b/llvm/test/CodeGen/Hexagon/dmul.ll index 1b79e0aa7d7..a6cf62b0c0a 100644 --- a/llvm/test/CodeGen/Hexagon/dmul.ll +++ b/llvm/test/CodeGen/Hexagon/dmul.ll @@ -8,10 +8,10 @@ entry: %a = alloca double, align 8 %b = alloca double, align 8 %c = alloca double, align 8 - store double 1.540000e+01, double* %a, align 8 - store double 9.100000e+00, double* %b, align 8 - %0 = load double, double* %b, align 8 - %1 = load double, double* %a, align 8 + store volatile double 1.540000e+01, double* %a, align 8 + store volatile double 9.100000e+00, double* %b, align 8 + %0 = load volatile double, double* %b, align 8 + %1 = load volatile double, double* %a, align 8 %mul = fmul double %0, %1 store double %mul, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll index 6bf8224904e..ccc287c5f2b 100644 --- a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll +++ b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll @@ -12,10 +12,10 @@ entry: %b = alloca double, align 8 %c = alloca double, align 8 store i32 0, i32* %retval - store double 1.540000e+01, double* %a, align 8 - store double 9.100000e+00, double* %b, align 8 - %0 = load double, double* %a, align 8 - %1 = load double, double* %b, align 8 + store volatile double 1.540000e+01, double* %a, align 8 + store volatile double 9.100000e+00, double* %b, align 8 + %0 = load volatile double, double* %a, align 8 + %1 = load volatile double, double* %b, align 8 %add = fadd double %0, %1 store double %add, double* %c, align 8 %2 = load double, double* %c, align 8 diff --git a/llvm/test/CodeGen/Hexagon/dsub.ll b/llvm/test/CodeGen/Hexagon/dsub.ll index 8b37301d84f..d7e44b307cf 100644 --- a/llvm/test/CodeGen/Hexagon/dsub.ll +++ b/llvm/test/CodeGen/Hexagon/dsub.ll @@ -8,10 +8,10 @@ entry: %a = alloca double, align 8 %b = alloca double, align 8 %c = alloca double, align 8 - store double 1.540000e+01, double* %a, align 8 - store double 9.100000e+00, double* %b, align 8 - %0 = load double, double* %b, align 8 - %1 = load double, double* %a, align 8 + store volatile double 1.540000e+01, double* %a, align 8 + store volatile double 9.100000e+00, double* %b, align 8 + %0 = load volatile double, double* %b, align 8 + %1 = load volatile double, double* %a, align 8 %sub = fsub double %0, %1 store double %sub, double* %c, align 8 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/fadd.ll b/llvm/test/CodeGen/Hexagon/fadd.ll index 0418c1724f5..65c6182dcc7 100644 --- a/llvm/test/CodeGen/Hexagon/fadd.ll +++ b/llvm/test/CodeGen/Hexagon/fadd.ll @@ -8,10 +8,10 @@ entry: %a = alloca float, align 4 %b = alloca float, align 4 %c = alloca float, align 4 - store float 0x402ECCCCC0000000, float* %a, align 4 - store float 0x4022333340000000, float* %b, align 4 - %0 = load float, float* %a, align 4 - %1 = load float, float* %b, align 4 + store volatile float 0x402ECCCCC0000000, float* %a, align 4 + store volatile float 0x4022333340000000, float* %b, align 4 + %0 = load volatile float, float* %a, align 4 + %1 = load volatile float, float* %b, align 4 %add = fadd float %0, %1 store float %add, float* %c, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/fmul.ll b/llvm/test/CodeGen/Hexagon/fmul.ll index 552f98ec7a5..e20e293c0a1 100644 --- a/llvm/test/CodeGen/Hexagon/fmul.ll +++ b/llvm/test/CodeGen/Hexagon/fmul.ll @@ -9,10 +9,10 @@ entry: %a = alloca float, align 4 %b = alloca float, align 4 %c = alloca float, align 4 - store float 0x402ECCCCC0000000, float* %a, align 4 - store float 0x4022333340000000, float* %b, align 4 - %0 = load float, float* %b, align 4 - %1 = load float, float* %a, align 4 + store volatile float 0x402ECCCCC0000000, float* %a, align 4 + store volatile float 0x4022333340000000, float* %b, align 4 + %0 = load volatile float, float* %b, align 4 + %1 = load volatile float, float* %a, align 4 %mul = fmul float %0, %1 store float %mul, float* %c, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Hexagon/fsub.ll b/llvm/test/CodeGen/Hexagon/fsub.ll index d7b0e2f65b3..e9a1fa3d192 100644 --- a/llvm/test/CodeGen/Hexagon/fsub.ll +++ b/llvm/test/CodeGen/Hexagon/fsub.ll @@ -8,10 +8,10 @@ entry: %a = alloca float, align 4 %b = alloca float, align 4 %c = alloca float, align 4 - store float 0x402ECCCCC0000000, float* %a, align 4 - store float 0x4022333340000000, float* %b, align 4 - %0 = load float, float* %b, align 4 - %1 = load float, float* %a, align 4 + store volatile float 0x402ECCCCC0000000, float* %a, align 4 + store volatile float 0x4022333340000000, float* %b, align 4 + %0 = load volatile float, float* %b, align 4 + %1 = load volatile float, float* %a, align 4 %sub = fsub float %0, %1 store float %sub, float* %c, align 4 ret i32 0 |