diff options
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/PowerPC/atomic-2.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/PowerPC/atomics-indexed.ll | 14 | ||||
-rw-r--r-- | llvm/test/CodeGen/PowerPC/atomics-regression.ll | 64 | ||||
-rw-r--r-- | llvm/test/CodeGen/PowerPC/atomics.ll | 14 |
4 files changed, 80 insertions, 18 deletions
diff --git a/llvm/test/CodeGen/PowerPC/atomic-2.ll b/llvm/test/CodeGen/PowerPC/atomic-2.ll index 18715ddb37c..f61ca3d5cbe 100644 --- a/llvm/test/CodeGen/PowerPC/atomic-2.ll +++ b/llvm/test/CodeGen/PowerPC/atomic-2.ll @@ -108,8 +108,10 @@ entry: ; CHECK: @atomic_load %tmp = load atomic i64, i64* %mem acquire, align 64 ; CHECK-NOT: ldarx -; CHECK: ld -; CHECK: lwsync +; CHECK: ld [[VAL:[0-9]+]] +; CHECK: cmpw [[CR:[0-9]+]], [[VAL]], [[VAL]] +; CHECK: bne- [[CR]], .+4 +; CHECK: isync ret i64 %tmp } diff --git a/llvm/test/CodeGen/PowerPC/atomics-indexed.ll b/llvm/test/CodeGen/PowerPC/atomics-indexed.ll index 7a0dde034d6..cfe15f0061c 100644 --- a/llvm/test/CodeGen/PowerPC/atomics-indexed.ll +++ b/llvm/test/CodeGen/PowerPC/atomics-indexed.ll @@ -10,16 +10,22 @@ define i8 @load_x_i8_seq_cst([100000 x i8]* %mem) { ; CHECK-LABEL: load_x_i8_seq_cst ; CHECK: sync -; CHECK: lbzx -; CHECK: lwsync +; CHECK: lbzx [[VAL:r[0-9]+]] +; CHECK-PPC32: lwsync +; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] +; CHECK-PPC64: bne- [[CR]], .+4 +; CHECK-PPC64: isync %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000 %val = load atomic i8, i8* %ptr seq_cst, align 1 ret i8 %val } define i16 @load_x_i16_acquire([100000 x i16]* %mem) { ; CHECK-LABEL: load_x_i16_acquire -; CHECK: lhzx -; CHECK: lwsync +; CHECK: lhzx [[VAL:r[0-9]+]] +; CHECK-PPC32: lwsync +; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] +; CHECK-PPC64: bne- [[CR]], .+4 +; CHECK-PPC64: isync %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000 %val = load atomic i16, i16* %ptr acquire, align 2 ret i16 %val diff --git a/llvm/test/CodeGen/PowerPC/atomics-regression.ll b/llvm/test/CodeGen/PowerPC/atomics-regression.ll index 9af82b62553..054d3a4146b 100644 --- a/llvm/test/CodeGen/PowerPC/atomics-regression.ll +++ b/llvm/test/CodeGen/PowerPC/atomics-regression.ll @@ -23,7 +23,9 @@ define i8 @test2(i8* %ptr) { ; PPC64LE-LABEL: test2: ; PPC64LE: # BB#0: ; PPC64LE-NEXT: lbz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr acquire, align 1 ret i8 %val @@ -35,7 +37,9 @@ define i8 @test3(i8* %ptr) { ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lbz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr seq_cst, align 1 ret i8 %val @@ -63,7 +67,9 @@ define i16 @test6(i16* %ptr) { ; PPC64LE-LABEL: test6: ; PPC64LE: # BB#0: ; PPC64LE-NEXT: lhz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr acquire, align 2 ret i16 %val @@ -75,7 +81,9 @@ define i16 @test7(i16* %ptr) { ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lhz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr seq_cst, align 2 ret i16 %val @@ -103,7 +111,9 @@ define i32 @test10(i32* %ptr) { ; PPC64LE-LABEL: test10: ; PPC64LE: # BB#0: ; PPC64LE-NEXT: lwz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr acquire, align 4 ret i32 %val @@ -115,7 +125,9 @@ define i32 @test11(i32* %ptr) { ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lwz 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr seq_cst, align 4 ret i32 %val @@ -143,7 +155,9 @@ define i64 @test14(i64* %ptr) { ; PPC64LE-LABEL: test14: ; PPC64LE: # BB#0: ; PPC64LE-NEXT: ld 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr acquire, align 8 ret i64 %val @@ -155,7 +169,9 @@ define i64 @test15(i64* %ptr) { ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: ld 3, 0(3) -; PPC64LE-NEXT: lwsync +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr seq_cst, align 8 ret i64 %val @@ -9544,3 +9560,35 @@ define i64 @test559(i64* %ptr, i64 %val) { %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst ret i64 %ret } + +; The second load should never be scheduled before isync. +define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) { +; PPC64LE-LABEL: test_ordering0: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwz 4, 0(3) +; PPC64LE-NEXT: cmpw 7, 4, 4 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: add 3, 4, 3 +; PPC64LE-NEXT: blr + %val1 = load atomic i32, i32* %ptr1 acquire, align 4 + %val2 = load i32, i32* %ptr1 + %add = add i32 %val1, %val2 + ret i32 %add +} + +; The second store should never be scheduled before isync. +define i32 @test_ordering1(i32* %ptr1, i32 %val1, i32* %ptr2) { +; PPC64LE-LABEL: test_ordering1: +; PPC64LE: # BB#0: +; PPC64LE-NEXT: lwz 3, 0(3) +; PPC64LE-NEXT: cmpw 7, 3, 3 +; PPC64LE-NEXT: bne- 7, .+4 +; PPC64LE-NEXT: isync +; PPC64LE-NEXT: stw 4, 0(5) +; PPC64LE-NEXT: blr + %val2 = load atomic i32, i32* %ptr1 acquire, align 4 + store i32 %val1, i32* %ptr2 + ret i32 %val2 +} diff --git a/llvm/test/CodeGen/PowerPC/atomics.ll b/llvm/test/CodeGen/PowerPC/atomics.ll index 2e1eff0f634..61d54534f5f 100644 --- a/llvm/test/CodeGen/PowerPC/atomics.ll +++ b/llvm/test/CodeGen/PowerPC/atomics.ll @@ -25,9 +25,12 @@ define i16 @load_i16_monotonic(i16* %mem) { } define i32 @load_i32_acquire(i32* %mem) { ; CHECK-LABEL: load_i32_acquire -; CHECK: lwz +; CHECK: lwz [[VAL:r[0-9]+]] %val = load atomic i32, i32* %mem acquire, align 4 -; CHECK: lwsync +; CHECK-PPC32: lwsync +; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] +; CHECK-PPC64: bne- [[CR]], .+4 +; CHECK-PPC64: isync ret i32 %val } define i64 @load_i64_seq_cst(i64* %mem) { @@ -35,9 +38,12 @@ define i64 @load_i64_seq_cst(i64* %mem) { ; CHECK: sync ; PPC32: __sync_ ; PPC64-NOT: __sync_ -; PPC64: ld +; PPC64: ld [[VAL:r[0-9]+]] %val = load atomic i64, i64* %mem seq_cst, align 8 -; CHECK: lwsync +; CHECK-PPC32: lwsync +; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] +; CHECK-PPC64: bne- [[CR]], .+4 +; CHECK-PPC64: isync ret i64 %val } |