summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/SPARC/atomics.ll132
-rw-r--r--llvm/test/Transforms/AtomicExpand/SPARC/partword.ll166
2 files changed, 296 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/SPARC/atomics.ll b/llvm/test/CodeGen/SPARC/atomics.ll
index 6fe333a4be8..5e608e728c3 100644
--- a/llvm/test/CodeGen/SPARC/atomics.ll
+++ b/llvm/test/CodeGen/SPARC/atomics.ll
@@ -64,6 +64,90 @@ entry:
ret i64 %2
}
+;; TODO: the "move %icc" and related instructions are totally
+;; redundant here. There's something weird happening in optimization
+;; of the success value of cmpxchg.
+
+; CHECK-LABEL: test_cmpxchg_i8
+; CHECK: and %o1, -4, %o2
+; CHECK: mov 3, %o3
+; CHECK: andn %o3, %o1, %o1
+; CHECK: sll %o1, 3, %o1
+; CHECK: mov 255, %o3
+; CHECK: sll %o3, %o1, %o5
+; CHECK: xor %o5, -1, %o3
+; CHECK: mov 123, %o4
+; CHECK: ld [%o2], %g2
+; CHECK: sll %o4, %o1, %o4
+; CHECK: and %o0, 255, %o0
+; CHECK: sll %o0, %o1, %o0
+; CHECK: andn %g2, %o5, %g2
+; CHECK: sethi 0, %o5
+; CHECK: [[LABEL1:\.L.*]]:
+; CHECK: or %g2, %o4, %g3
+; CHECK: or %g2, %o0, %g4
+; CHECK: cas [%o2], %g4, %g3
+; CHECK: cmp %g3, %g4
+; CHECK: mov %o5, %g4
+; CHECK: move %icc, 1, %g4
+; CHECK: cmp %g4, 0
+; CHECK: bne [[LABEL2:\.L.*]]
+; CHECK: nop
+; CHECK: and %g3, %o3, %g4
+; CHECK: cmp %g2, %g4
+; CHECK: bne [[LABEL1]]
+; CHECK: mov %g4, %g2
+; CHECK: [[LABEL2]]:
+; CHECK: retl
+; CHECK: srl %g3, %o1, %o0
+define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
+entry:
+ %pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
+ %b = extractvalue { i8, i1 } %pair, 0
+ ret i8 %b
+}
+
+; CHECK-LABEL: test_cmpxchg_i16
+
+; CHECK: and %o1, -4, %o2
+; CHECK: and %o1, 3, %o1
+; CHECK: xor %o1, 2, %o1
+; CHECK: sll %o1, 3, %o1
+; CHECK: sethi 63, %o3
+; CHECK: or %o3, 1023, %o4
+; CHECK: sll %o4, %o1, %o5
+; CHECK: xor %o5, -1, %o3
+; CHECK: and %o0, %o4, %o4
+; CHECK: ld [%o2], %g2
+; CHECK: mov 123, %o0
+; CHECK: sll %o0, %o1, %o0
+; CHECK: sll %o4, %o1, %o4
+; CHECK: andn %g2, %o5, %g2
+; CHECK: sethi 0, %o5
+; CHECK: [[LABEL1:\.L.*]]:
+; CHECK: or %g2, %o0, %g3
+; CHECK: or %g2, %o4, %g4
+; CHECK: cas [%o2], %g4, %g3
+; CHECK: cmp %g3, %g4
+; CHECK: mov %o5, %g4
+; CHECK: move %icc, 1, %g4
+; CHECK: cmp %g4, 0
+; CHECK: bne [[LABEL2:\.L.*]]
+; CHECK: nop
+; CHECK: and %g3, %o3, %g4
+; CHECK: cmp %g2, %g4
+; CHECK: bne [[LABEL1]]
+; CHECK: mov %g4, %g2
+; CHECK: [[LABEL2]]:
+; CHECK: retl
+; CHECK: srl %g3, %o1, %o0
+define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
+entry:
+ %pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
+ %b = extractvalue { i16, i1 } %pair, 0
+ ret i16 %b
+}
+
; CHECK-LABEL: test_cmpxchg_i32
; CHECK: mov 123, [[R:%[gilo][0-7]]]
; CHECK: cas [%o1], %o0, [[R]]
@@ -86,6 +170,26 @@ entry:
ret i64 %b
}
+; CHECK-LABEL: test_swap_i8
+; CHECK: mov 42, [[R:%[gilo][0-7]]]
+; CHECK: cas
+
+define i8 @test_swap_i8(i8 %a, i8* %ptr) {
+entry:
+ %b = atomicrmw xchg i8* %ptr, i8 42 monotonic
+ ret i8 %b
+}
+
+; CHECK-LABEL: test_swap_i16
+; CHECK: mov 42, [[R:%[gilo][0-7]]]
+; CHECK: cas
+
+define i16 @test_swap_i16(i16 %a, i16* %ptr) {
+entry:
+ %b = atomicrmw xchg i16* %ptr, i16 42 monotonic
+ ret i16 %b
+}
+
; CHECK-LABEL: test_swap_i32
; CHECK: mov 42, [[R:%[gilo][0-7]]]
; CHECK: swap [%o1], [[R]]
@@ -105,12 +209,36 @@ entry:
ret i64 %b
}
-; CHECK-LABEL: test_load_add_32
+; CHECK-LABEL: test_load_sub_i8
+; CHECK: membar
+; CHECK: .L{{.*}}:
+; CHECK: sub
+; CHECK: cas [{{%[gilo][0-7]}}]
+; CHECK: membar
+define zeroext i8 @test_load_sub_i8(i8* %p, i8 zeroext %v) {
+entry:
+ %0 = atomicrmw sub i8* %p, i8 %v seq_cst
+ ret i8 %0
+}
+
+; CHECK-LABEL: test_load_sub_i16
+; CHECK: membar
+; CHECK: .L{{.*}}:
+; CHECK: sub
+; CHECK: cas [{{%[gilo][0-7]}}]
+; CHECK: membar
+define zeroext i16 @test_load_sub_i16(i16* %p, i16 zeroext %v) {
+entry:
+ %0 = atomicrmw sub i16* %p, i16 %v seq_cst
+ ret i16 %0
+}
+
+; CHECK-LABEL: test_load_add_i32
; CHECK: membar
; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
; CHECK: cas [%o0], [[V]], [[U]]
; CHECK: membar
-define zeroext i32 @test_load_add_32(i32* %p, i32 zeroext %v) {
+define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
entry:
%0 = atomicrmw add i32* %p, i32 %v seq_cst
ret i32 %0
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
new file mode 100644
index 00000000000..9963d17c242
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/partword.ll
@@ -0,0 +1,166 @@
+; RUN: opt -S %s -atomic-expand | FileCheck %s
+
+;; Verify the cmpxchg and atomicrmw expansions where sub-word-size
+;; instructions are not available.
+
+;;; NOTE: this test is mostly target-independent -- any target which
+;;; doesn't support cmpxchg of sub-word sizes would do.
+target datalayout = "E-m:e-i64:64-n32:64-S128"
+target triple = "sparcv9-unknown-unknown"
+
+; CHECK-LABEL: @test_cmpxchg_i8(
+; CHECK: fence seq_cst
+; CHECK: %0 = ptrtoint i8* %arg to i64
+; CHECK: %1 = and i64 %0, -4
+; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
+; CHECK: %PtrLSB = and i64 %0, 3
+; CHECK: %2 = xor i64 %PtrLSB, 3
+; CHECK: %3 = shl i64 %2, 3
+; CHECK: %ShiftAmt = trunc i64 %3 to i32
+; CHECK: %Mask = shl i32 255, %ShiftAmt
+; CHECK: %Inv_Mask = xor i32 %Mask, -1
+; CHECK: %4 = zext i8 %new to i32
+; CHECK: %5 = shl i32 %4, %ShiftAmt
+; CHECK: %6 = zext i8 %old to i32
+; CHECK: %7 = shl i32 %6, %ShiftAmt
+; CHECK: %8 = load i32, i32* %AlignedAddr
+; CHECK: %9 = and i32 %8, %Inv_Mask
+; CHECK: br label %partword.cmpxchg.loop
+; CHECK:partword.cmpxchg.loop:
+; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
+; CHECK: %11 = or i32 %10, %5
+; CHECK: %12 = or i32 %10, %7
+; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
+; CHECK: %14 = extractvalue { i32, i1 } %13, 0
+; CHECK: %15 = extractvalue { i32, i1 } %13, 1
+; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
+; CHECK:partword.cmpxchg.failure:
+; CHECK: %16 = and i32 %14, %Inv_Mask
+; CHECK: %17 = icmp ne i32 %10, %16
+; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
+; CHECK:partword.cmpxchg.end:
+; CHECK: %18 = lshr i32 %14, %ShiftAmt
+; CHECK: %19 = trunc i32 %18 to i8
+; CHECK: %20 = insertvalue { i8, i1 } undef, i8 %19, 0
+; CHECK: %21 = insertvalue { i8, i1 } %20, i1 %15, 1
+; CHECK: fence seq_cst
+; CHECK: %ret = extractvalue { i8, i1 } %21, 0
+; CHECK: ret i8 %ret
+define i8 @test_cmpxchg_i8(i8* %arg, i8 %old, i8 %new) {
+entry:
+ %ret_succ = cmpxchg i8* %arg, i8 %old, i8 %new seq_cst monotonic
+ %ret = extractvalue { i8, i1 } %ret_succ, 0
+ ret i8 %ret
+}
+
+; CHECK-LABEL: @test_cmpxchg_i16(
+; CHECK: fence seq_cst
+; CHECK: %0 = ptrtoint i16* %arg to i64
+; CHECK: %1 = and i64 %0, -4
+; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
+; CHECK: %PtrLSB = and i64 %0, 3
+; CHECK: %2 = xor i64 %PtrLSB, 2
+; CHECK: %3 = shl i64 %2, 3
+; CHECK: %ShiftAmt = trunc i64 %3 to i32
+; CHECK: %Mask = shl i32 65535, %ShiftAmt
+; CHECK: %Inv_Mask = xor i32 %Mask, -1
+; CHECK: %4 = zext i16 %new to i32
+; CHECK: %5 = shl i32 %4, %ShiftAmt
+; CHECK: %6 = zext i16 %old to i32
+; CHECK: %7 = shl i32 %6, %ShiftAmt
+; CHECK: %8 = load i32, i32* %AlignedAddr
+; CHECK: %9 = and i32 %8, %Inv_Mask
+; CHECK: br label %partword.cmpxchg.loop
+; CHECK:partword.cmpxchg.loop:
+; CHECK: %10 = phi i32 [ %9, %entry ], [ %16, %partword.cmpxchg.failure ]
+; CHECK: %11 = or i32 %10, %5
+; CHECK: %12 = or i32 %10, %7
+; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %12, i32 %11 monotonic monotonic
+; CHECK: %14 = extractvalue { i32, i1 } %13, 0
+; CHECK: %15 = extractvalue { i32, i1 } %13, 1
+; CHECK: br i1 %15, label %partword.cmpxchg.end, label %partword.cmpxchg.failure
+; CHECK:partword.cmpxchg.failure:
+; CHECK: %16 = and i32 %14, %Inv_Mask
+; CHECK: %17 = icmp ne i32 %10, %16
+; CHECK: br i1 %17, label %partword.cmpxchg.loop, label %partword.cmpxchg.end
+; CHECK:partword.cmpxchg.end:
+; CHECK: %18 = lshr i32 %14, %ShiftAmt
+; CHECK: %19 = trunc i32 %18 to i16
+; CHECK: %20 = insertvalue { i16, i1 } undef, i16 %19, 0
+; CHECK: %21 = insertvalue { i16, i1 } %20, i1 %15, 1
+; CHECK: fence seq_cst
+; CHECK: %ret = extractvalue { i16, i1 } %21, 0
+; CHECK: ret i16 %ret
+define i16 @test_cmpxchg_i16(i16* %arg, i16 %old, i16 %new) {
+entry:
+ %ret_succ = cmpxchg i16* %arg, i16 %old, i16 %new seq_cst monotonic
+ %ret = extractvalue { i16, i1 } %ret_succ, 0
+ ret i16 %ret
+}
+
+
+; CHECK-LABEL: @test_add_i16(
+; CHECK: fence seq_cst
+; CHECK: %0 = ptrtoint i16* %arg to i64
+; CHECK: %1 = and i64 %0, -4
+; CHECK: %AlignedAddr = inttoptr i64 %1 to i32*
+; CHECK: %PtrLSB = and i64 %0, 3
+; CHECK: %2 = xor i64 %PtrLSB, 2
+; CHECK: %3 = shl i64 %2, 3
+; CHECK: %ShiftAmt = trunc i64 %3 to i32
+; CHECK: %Mask = shl i32 65535, %ShiftAmt
+; CHECK: %Inv_Mask = xor i32 %Mask, -1
+; CHECK: %4 = zext i16 %val to i32
+; CHECK: %ValOperand_Shifted = shl i32 %4, %ShiftAmt
+; CHECK: %5 = load i32, i32* %AlignedAddr, align 4
+; CHECK: br label %atomicrmw.start
+; CHECK:atomicrmw.start:
+; CHECK: %loaded = phi i32 [ %5, %entry ], [ %newloaded, %atomicrmw.start ]
+; CHECK: %new = add i32 %loaded, %ValOperand_Shifted
+; CHECK: %6 = and i32 %new, %Mask
+; CHECK: %7 = and i32 %loaded, %Inv_Mask
+; CHECK: %8 = or i32 %7, %6
+; CHECK: %9 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %8 monotonic monotonic
+; CHECK: %success = extractvalue { i32, i1 } %9, 1
+; CHECK: %newloaded = extractvalue { i32, i1 } %9, 0
+; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
+; CHECK:atomicrmw.end:
+; CHECK: %10 = lshr i32 %newloaded, %ShiftAmt
+; CHECK: %11 = trunc i32 %10 to i16
+; CHECK: fence seq_cst
+; CHECK: ret i16 %11
+define i16 @test_add_i16(i16* %arg, i16 %val) {
+entry:
+ %ret = atomicrmw add i16* %arg, i16 %val seq_cst
+ ret i16 %ret
+}
+
+; CHECK-LABEL: @test_xor_i16(
+; (I'm going to just assert on the bits that differ from add, above.)
+; CHECK:atomicrmw.start:
+; CHECK: %new = xor i32 %loaded, %ValOperand_Shifted
+; CHECK: %6 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %new monotonic monotonic
+; CHECK:atomicrmw.end:
+define i16 @test_xor_i16(i16* %arg, i16 %val) {
+entry:
+ %ret = atomicrmw xor i16* %arg, i16 %val seq_cst
+ ret i16 %ret
+}
+
+; CHECK-LABEL: @test_min_i16(
+; CHECK:atomicrmw.start:
+; CHECK: %6 = lshr i32 %loaded, %ShiftAmt
+; CHECK: %7 = trunc i32 %6 to i16
+; CHECK: %8 = icmp sle i16 %7, %val
+; CHECK: %new = select i1 %8, i16 %7, i16 %val
+; CHECK: %9 = zext i16 %new to i32
+; CHECK: %10 = shl i32 %9, %ShiftAmt
+; CHECK: %11 = and i32 %loaded, %Inv_Mask
+; CHECK: %12 = or i32 %11, %10
+; CHECK: %13 = cmpxchg i32* %AlignedAddr, i32 %loaded, i32 %12 monotonic monotonic
+; CHECK:atomicrmw.end:
+define i16 @test_min_i16(i16* %arg, i16 %val) {
+entry:
+ %ret = atomicrmw min i16* %arg, i16 %val seq_cst
+ ret i16 %ret
+}
OpenPOWER on IntegriCloud