diff options
Diffstat (limited to 'clang')
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 105 | ||||
-rw-r--r-- | clang/test/CodeGen/2008-03-05-syncPtr.c | 19 | ||||
-rw-r--r-- | clang/test/CodeGen/2010-01-13-MemBarrier.c | 8 | ||||
-rw-r--r-- | clang/test/CodeGen/Atomics-no64bit.c | 172 | ||||
-rw-r--r-- | clang/test/CodeGen/Atomics.c | 352 | ||||
-rw-r--r-- | clang/test/CodeGen/atomic.c | 109 |
6 files changed, 261 insertions, 504 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 70652d8cd07..642bf15e1b4 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -27,20 +27,6 @@ using namespace clang; using namespace CodeGen; using namespace llvm; -static void EmitMemoryBarrier(CodeGenFunction &CGF, - bool LoadLoad, bool LoadStore, - bool StoreLoad, bool StoreStore, - bool Device) { - Value *True = CGF.Builder.getTrue(); - Value *False = CGF.Builder.getFalse(); - Value *C[5] = { LoadLoad ? True : False, - LoadStore ? True : False, - StoreLoad ? True : False, - StoreStore ? True : False, - Device ? True : False }; - CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C); -} - /// Emit the conversions required to turn the given value into an /// integer of the given size. static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, @@ -65,25 +51,11 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, return V; } -// The atomic builtins are also full memory barriers. This is a utility for -// wrapping a call to the builtins with memory barriers. -static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn, - ArrayRef<Value *> Args) { - // FIXME: We need a target hook for whether this applies to device memory or - // not. - bool Device = true; - - // Create barriers both before and after the call. - EmitMemoryBarrier(CGF, true, true, true, true, Device); - Value *Result = CGF.Builder.CreateCall(Fn, Args); - EmitMemoryBarrier(CGF, true, true, true, true, Device); - return Result; -} - /// Utility to insert an atomic instruction based on Instrinsic::ID /// and the expression node. static RValue EmitBinaryAtomic(CodeGenFunction &CGF, - Intrinsic::ID Id, const CallExpr *E) { + llvm::AtomicRMWInst::BinOp Kind, + const CallExpr *E) { QualType T = E->getType(); assert(E->getArg(0)->getType()->isPointerType()); assert(CGF.getContext().hasSameUnqualifiedType(T, @@ -99,16 +71,15 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF, CGF.getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; - llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes); - llvm::Value *Args[2]; Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); Args[1] = CGF.EmitScalarExpr(E->getArg(1)); llvm::Type *ValueType = Args[1]->getType(); Args[1] = EmitToInt(CGF, Args[1], T, IntType); - llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args); + llvm::Value *Result = + CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], + llvm::SequentiallyConsistent); Result = EmitFromInt(CGF, Result, T, ValueType); return RValue::get(Result); } @@ -117,7 +88,8 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF, /// the expression node, where the return value is the result of the /// operation. static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, - Intrinsic::ID Id, const CallExpr *E, + llvm::AtomicRMWInst::BinOp Kind, + const CallExpr *E, Instruction::BinaryOps Op) { QualType T = E->getType(); assert(E->getArg(0)->getType()->isPointerType()); @@ -134,16 +106,15 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, CGF.getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; - llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes); - llvm::Value *Args[2]; Args[1] = CGF.EmitScalarExpr(E->getArg(1)); llvm::Type *ValueType = Args[1]->getType(); Args[1] = EmitToInt(CGF, Args[1], T, IntType); Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); - llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args); + llvm::Value *Result = + CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], + llvm::SequentiallyConsistent); Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); Result = EmitFromInt(CGF, Result, T, ValueType); return RValue::get(Result); @@ -780,76 +751,76 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); case Builtin::BI__sync_fetch_and_sub_1: case Builtin::BI__sync_fetch_and_sub_2: case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); case Builtin::BI__sync_fetch_and_or_1: case Builtin::BI__sync_fetch_and_or_2: case Builtin::BI__sync_fetch_and_or_4: case Builtin::BI__sync_fetch_and_or_8: case Builtin::BI__sync_fetch_and_or_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); case Builtin::BI__sync_fetch_and_and_1: case Builtin::BI__sync_fetch_and_and_2: case Builtin::BI__sync_fetch_and_and_4: case Builtin::BI__sync_fetch_and_and_8: case Builtin::BI__sync_fetch_and_and_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); case Builtin::BI__sync_fetch_and_xor_1: case Builtin::BI__sync_fetch_and_xor_2: case Builtin::BI__sync_fetch_and_xor_4: case Builtin::BI__sync_fetch_and_xor_8: case Builtin::BI__sync_fetch_and_xor_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); // Clang extensions: not overloaded yet. case Builtin::BI__sync_fetch_and_min: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); case Builtin::BI__sync_fetch_and_max: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); case Builtin::BI__sync_fetch_and_umin: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); case Builtin::BI__sync_fetch_and_umax: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); case Builtin::BI__sync_add_and_fetch_1: case Builtin::BI__sync_add_and_fetch_2: case Builtin::BI__sync_add_and_fetch_4: case Builtin::BI__sync_add_and_fetch_8: case Builtin::BI__sync_add_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E, + return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, llvm::Instruction::Add); case Builtin::BI__sync_sub_and_fetch_1: case Builtin::BI__sync_sub_and_fetch_2: case Builtin::BI__sync_sub_and_fetch_4: case Builtin::BI__sync_sub_and_fetch_8: case Builtin::BI__sync_sub_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E, + return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, llvm::Instruction::Sub); case Builtin::BI__sync_and_and_fetch_1: case Builtin::BI__sync_and_and_fetch_2: case Builtin::BI__sync_and_and_fetch_4: case Builtin::BI__sync_and_and_fetch_8: case Builtin::BI__sync_and_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E, + return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, llvm::Instruction::And); case Builtin::BI__sync_or_and_fetch_1: case Builtin::BI__sync_or_and_fetch_2: case Builtin::BI__sync_or_and_fetch_4: case Builtin::BI__sync_or_and_fetch_8: case Builtin::BI__sync_or_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E, + return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, llvm::Instruction::Or); case Builtin::BI__sync_xor_and_fetch_1: case Builtin::BI__sync_xor_and_fetch_2: case Builtin::BI__sync_xor_and_fetch_4: case Builtin::BI__sync_xor_and_fetch_8: case Builtin::BI__sync_xor_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, + return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, llvm::Instruction::Xor); case Builtin::BI__sync_val_compare_and_swap_1: @@ -866,9 +837,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; - Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, - IntrinsicTypes); Value *Args[3]; Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); @@ -877,7 +845,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Args[1] = EmitToInt(*this, Args[1], T, IntType); Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); - Value *Result = EmitCallWithBarrier(*this, AtomF, Args); + Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], + llvm::SequentiallyConsistent); Result = EmitFromInt(*this, Result, T, ValueType); return RValue::get(Result); } @@ -896,9 +865,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); - llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType }; - Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, - IntrinsicTypes); Value *Args[3]; Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); @@ -906,7 +872,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); Value *OldVal = Args[1]; - Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args); + Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], + llvm::SequentiallyConsistent); Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); // zext bool to int. Result = Builder.CreateZExt(Result, ConvertType(E->getType())); @@ -918,14 +885,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_swap_4: case Builtin::BI__sync_swap_8: case Builtin::BI__sync_swap_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); case Builtin::BI__sync_lock_test_and_set_1: case Builtin::BI__sync_lock_test_and_set_2: case Builtin::BI__sync_lock_test_and_set_4: case Builtin::BI__sync_lock_test_and_set_8: case Builtin::BI__sync_lock_test_and_set_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); + return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); case Builtin::BI__sync_lock_release_1: case Builtin::BI__sync_lock_release_2: @@ -937,13 +904,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, cast<llvm::PointerType>(Ptr->getType())->getElementType(); llvm::StoreInst *Store = Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr); + // FIXME: This is completely, utterly wrong; it only even sort-of works + // on x86. Store->setVolatile(true); return RValue::get(0); } case Builtin::BI__sync_synchronize: { - // We assume like gcc appears to, that this only applies to cached memory. - EmitMemoryBarrier(*this, true, true, true, true, false); + // We assume this is supposed to correspond to a C++0x-style + // sequentially-consistent fence (i.e. this is only usable for + // synchonization, not device I/O or anything like that). This intrinsic + // is really badly designed in the sense that in theory, there isn't + // any way to safely use it... but in practice, it mostly works + // to use it with non-atomic loads and stores to get acquire/release + // semantics. + Builder.CreateFence(llvm::SequentiallyConsistent); return RValue::get(0); } diff --git a/clang/test/CodeGen/2008-03-05-syncPtr.c b/clang/test/CodeGen/2008-03-05-syncPtr.c index 3cabcfef19d..784295ce689 100644 --- a/clang/test/CodeGen/2008-03-05-syncPtr.c +++ b/clang/test/CodeGen/2008-03-05-syncPtr.c @@ -1,27 +1,40 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | grep llvm.atomic -// XFAIL: sparc-sun-solaris2|arm -// Feature currently implemented only for x86, alpha, powerpc. +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s int* foo(int** a, int* b, int* c) { return __sync_val_compare_and_swap (a, b, c); } +// CHECK: define i32* @foo +// CHECK: cmpxchg int foo2(int** a, int* b, int* c) { return __sync_bool_compare_and_swap (a, b, c); } +// CHECK: define i32 @foo2 +// CHECK: cmpxchg int* foo3(int** a, int b) { return __sync_fetch_and_add (a, b); } +// CHECK: define i32* @foo3 +// CHECK: atomicrmw add + int* foo4(int** a, int b) { return __sync_fetch_and_sub (a, b); } +// CHECK: define i32* @foo4 +// CHECK: atomicrmw sub + int* foo5(int** a, int* b) { return __sync_lock_test_and_set (a, b); } +// CHECK: define i32* @foo5 +// CHECK: atomicrmw xchg + int* foo6(int** a, int*** b) { return __sync_lock_test_and_set (a, b); } +// CHECK: define i32* @foo6 +// CHECK: atomicrmw xchg diff --git a/clang/test/CodeGen/2010-01-13-MemBarrier.c b/clang/test/CodeGen/2010-01-13-MemBarrier.c index 32ad97ca469..c2b0acdab3c 100644 --- a/clang/test/CodeGen/2010-01-13-MemBarrier.c +++ b/clang/test/CodeGen/2010-01-13-MemBarrier.c @@ -2,10 +2,10 @@ // XFAIL: sparc // rdar://7536390 -unsigned t(unsigned *ptr, unsigned val) { +typedef unsigned __INT32_TYPE__ uint32_t; + +unsigned t(uint32_t *ptr, uint32_t val) { // CHECK: @t - // CHECK: call void @llvm.memory.barrier - // CHECK-NEXT: call i32 @llvm.atomic.swap.i32 - // CHECK-NEXT: call void @llvm.memory.barrier + // CHECK: atomicrmw xchg i32* {{.*}} seq_cst return __sync_lock_test_and_set(ptr, val); } diff --git a/clang/test/CodeGen/Atomics-no64bit.c b/clang/test/CodeGen/Atomics-no64bit.c deleted file mode 100644 index b57b27a4d59..00000000000 --- a/clang/test/CodeGen/Atomics-no64bit.c +++ /dev/null @@ -1,172 +0,0 @@ -// Test frontend handling of __sync builtins. -// Modified from a gcc testcase. -// RUN: %clang_cc1 -emit-llvm %s -o - | grep atomic | count 129 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i8 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i16 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i32 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep volatile | count 6 - -// Currently this is implemented only for Alpha, X86, PowerPC. -// Add your target here if it doesn't work. -// This version of the test does not include long long. -// XFAIL: sparc - -signed char sc; -unsigned char uc; -signed short ss; -unsigned short us; -signed int si; -unsigned int ui; - -void test_op_ignore (void) -{ - (void) __sync_fetch_and_add (&sc, 1); - (void) __sync_fetch_and_add (&uc, 1); - (void) __sync_fetch_and_add (&ss, 1); - (void) __sync_fetch_and_add (&us, 1); - (void) __sync_fetch_and_add (&si, 1); - (void) __sync_fetch_and_add (&ui, 1); - - (void) __sync_fetch_and_sub (&sc, 1); - (void) __sync_fetch_and_sub (&uc, 1); - (void) __sync_fetch_and_sub (&ss, 1); - (void) __sync_fetch_and_sub (&us, 1); - (void) __sync_fetch_and_sub (&si, 1); - (void) __sync_fetch_and_sub (&ui, 1); - - (void) __sync_fetch_and_or (&sc, 1); - (void) __sync_fetch_and_or (&uc, 1); - (void) __sync_fetch_and_or (&ss, 1); - (void) __sync_fetch_and_or (&us, 1); - (void) __sync_fetch_and_or (&si, 1); - (void) __sync_fetch_and_or (&ui, 1); - - (void) __sync_fetch_and_xor (&sc, 1); - (void) __sync_fetch_and_xor (&uc, 1); - (void) __sync_fetch_and_xor (&ss, 1); - (void) __sync_fetch_and_xor (&us, 1); - (void) __sync_fetch_and_xor (&si, 1); - (void) __sync_fetch_and_xor (&ui, 1); - - (void) __sync_fetch_and_and (&sc, 1); - (void) __sync_fetch_and_and (&uc, 1); - (void) __sync_fetch_and_and (&ss, 1); - (void) __sync_fetch_and_and (&us, 1); - (void) __sync_fetch_and_and (&si, 1); - (void) __sync_fetch_and_and (&ui, 1); - -} - -void test_fetch_and_op (void) -{ - sc = __sync_fetch_and_add (&sc, 11); - uc = __sync_fetch_and_add (&uc, 11); - ss = __sync_fetch_and_add (&ss, 11); - us = __sync_fetch_and_add (&us, 11); - si = __sync_fetch_and_add (&si, 11); - ui = __sync_fetch_and_add (&ui, 11); - - sc = __sync_fetch_and_sub (&sc, 11); - uc = __sync_fetch_and_sub (&uc, 11); - ss = __sync_fetch_and_sub (&ss, 11); - us = __sync_fetch_and_sub (&us, 11); - si = __sync_fetch_and_sub (&si, 11); - ui = __sync_fetch_and_sub (&ui, 11); - - sc = __sync_fetch_and_or (&sc, 11); - uc = __sync_fetch_and_or (&uc, 11); - ss = __sync_fetch_and_or (&ss, 11); - us = __sync_fetch_and_or (&us, 11); - si = __sync_fetch_and_or (&si, 11); - ui = __sync_fetch_and_or (&ui, 11); - - sc = __sync_fetch_and_xor (&sc, 11); - uc = __sync_fetch_and_xor (&uc, 11); - ss = __sync_fetch_and_xor (&ss, 11); - us = __sync_fetch_and_xor (&us, 11); - si = __sync_fetch_and_xor (&si, 11); - ui = __sync_fetch_and_xor (&ui, 11); - - sc = __sync_fetch_and_and (&sc, 11); - uc = __sync_fetch_and_and (&uc, 11); - ss = __sync_fetch_and_and (&ss, 11); - us = __sync_fetch_and_and (&us, 11); - si = __sync_fetch_and_and (&si, 11); - ui = __sync_fetch_and_and (&ui, 11); - -} - -void test_op_and_fetch (void) -{ - sc = __sync_add_and_fetch (&sc, uc); - uc = __sync_add_and_fetch (&uc, uc); - ss = __sync_add_and_fetch (&ss, uc); - us = __sync_add_and_fetch (&us, uc); - si = __sync_add_and_fetch (&si, uc); - ui = __sync_add_and_fetch (&ui, uc); - - sc = __sync_sub_and_fetch (&sc, uc); - uc = __sync_sub_and_fetch (&uc, uc); - ss = __sync_sub_and_fetch (&ss, uc); - us = __sync_sub_and_fetch (&us, uc); - si = __sync_sub_and_fetch (&si, uc); - ui = __sync_sub_and_fetch (&ui, uc); - - sc = __sync_or_and_fetch (&sc, uc); - uc = __sync_or_and_fetch (&uc, uc); - ss = __sync_or_and_fetch (&ss, uc); - us = __sync_or_and_fetch (&us, uc); - si = __sync_or_and_fetch (&si, uc); - ui = __sync_or_and_fetch (&ui, uc); - - sc = __sync_xor_and_fetch (&sc, uc); - uc = __sync_xor_and_fetch (&uc, uc); - ss = __sync_xor_and_fetch (&ss, uc); - us = __sync_xor_and_fetch (&us, uc); - si = __sync_xor_and_fetch (&si, uc); - ui = __sync_xor_and_fetch (&ui, uc); - - sc = __sync_and_and_fetch (&sc, uc); - uc = __sync_and_and_fetch (&uc, uc); - ss = __sync_and_and_fetch (&ss, uc); - us = __sync_and_and_fetch (&us, uc); - si = __sync_and_and_fetch (&si, uc); - ui = __sync_and_and_fetch (&ui, uc); - -} - -void test_compare_and_swap (void) -{ - sc = __sync_val_compare_and_swap (&sc, uc, sc); - uc = __sync_val_compare_and_swap (&uc, uc, sc); - ss = __sync_val_compare_and_swap (&ss, uc, sc); - us = __sync_val_compare_and_swap (&us, uc, sc); - si = __sync_val_compare_and_swap (&si, uc, sc); - ui = __sync_val_compare_and_swap (&ui, uc, sc); - - ui = __sync_bool_compare_and_swap (&sc, uc, sc); - ui = __sync_bool_compare_and_swap (&uc, uc, sc); - ui = __sync_bool_compare_and_swap (&ss, uc, sc); - ui = __sync_bool_compare_and_swap (&us, uc, sc); - ui = __sync_bool_compare_and_swap (&si, uc, sc); - ui = __sync_bool_compare_and_swap (&ui, uc, sc); -} - -void test_lock (void) -{ - sc = __sync_lock_test_and_set (&sc, 1); - uc = __sync_lock_test_and_set (&uc, 1); - ss = __sync_lock_test_and_set (&ss, 1); - us = __sync_lock_test_and_set (&us, 1); - si = __sync_lock_test_and_set (&si, 1); - ui = __sync_lock_test_and_set (&ui, 1); - - __sync_synchronize (); - - __sync_lock_release (&sc); - __sync_lock_release (&uc); - __sync_lock_release (&ss); - __sync_lock_release (&us); - __sync_lock_release (&si); - __sync_lock_release (&ui); -} diff --git a/clang/test/CodeGen/Atomics.c b/clang/test/CodeGen/Atomics.c index 36a82ae4490..e5a5812f462 100644 --- a/clang/test/CodeGen/Atomics.c +++ b/clang/test/CodeGen/Atomics.c @@ -1,15 +1,6 @@ // Test frontend handling of __sync builtins. // Modified from a gcc testcase. -// RUN: %clang_cc1 -emit-llvm %s -o - | grep atomic | count 172 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i8 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i16 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep p0i32 | count 43 -// RUN: %clang_cc1 -emit-llvm %s -o - | grep volatile | count 8 - -// Currently this is implemented only for Alpha, X86, PowerPC. -// Add your target here if it doesn't work. -// PPC32 does not translate the long long variants, so fails this test. -// XFAIL: sparc,powerpc +// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s signed char sc; unsigned char uc; @@ -20,193 +11,194 @@ unsigned int ui; signed long long sll; unsigned long long ull; -void test_op_ignore (void) +void test_op_ignore (void) // CHECK: define void @test_op_ignore { - (void) __sync_fetch_and_add (&sc, 1); - (void) __sync_fetch_and_add (&uc, 1); - (void) __sync_fetch_and_add (&ss, 1); - (void) __sync_fetch_and_add (&us, 1); - (void) __sync_fetch_and_add (&si, 1); - (void) __sync_fetch_and_add (&ui, 1); - (void) __sync_fetch_and_add (&sll, 1); - (void) __sync_fetch_and_add (&ull, 1); - - (void) __sync_fetch_and_sub (&sc, 1); - (void) __sync_fetch_and_sub (&uc, 1); - (void) __sync_fetch_and_sub (&ss, 1); - (void) __sync_fetch_and_sub (&us, 1); - (void) __sync_fetch_and_sub (&si, 1); - (void) __sync_fetch_and_sub (&ui, 1); - (void) __sync_fetch_and_sub (&sll, 1); - (void) __sync_fetch_and_sub (&ull, 1); - - (void) __sync_fetch_and_or (&sc, 1); - (void) __sync_fetch_and_or (&uc, 1); - (void) __sync_fetch_and_or (&ss, 1); - (void) __sync_fetch_and_or (&us, 1); - (void) __sync_fetch_and_or (&si, 1); - (void) __sync_fetch_and_or (&ui, 1); - (void) __sync_fetch_and_or (&sll, 1); - (void) __sync_fetch_and_or (&ull, 1); - - (void) __sync_fetch_and_xor (&sc, 1); - (void) __sync_fetch_and_xor (&uc, 1); - (void) __sync_fetch_and_xor (&ss, 1); - (void) __sync_fetch_and_xor (&us, 1); - (void) __sync_fetch_and_xor (&si, 1); - (void) __sync_fetch_and_xor (&ui, 1); - (void) __sync_fetch_and_xor (&sll, 1); - (void) __sync_fetch_and_xor (&ull, 1); - - (void) __sync_fetch_and_and (&sc, 1); - (void) __sync_fetch_and_and (&uc, 1); - (void) __sync_fetch_and_and (&ss, 1); - (void) __sync_fetch_and_and (&us, 1); - (void) __sync_fetch_and_and (&si, 1); - (void) __sync_fetch_and_and (&ui, 1); - (void) __sync_fetch_and_and (&sll, 1); - (void) __sync_fetch_and_and (&ull, 1); + (void) __sync_fetch_and_add (&sc, 1); // CHECK: atomicrmw add i8 + (void) __sync_fetch_and_add (&uc, 1); // CHECK: atomicrmw add i8 + (void) __sync_fetch_and_add (&ss, 1); // CHECK: atomicrmw add i16 + (void) __sync_fetch_and_add (&us, 1); // CHECK: atomicrmw add i16 + (void) __sync_fetch_and_add (&si, 1); // CHECK: atomicrmw add i32 + (void) __sync_fetch_and_add (&ui, 1); // CHECK: atomicrmw add i32 + (void) __sync_fetch_and_add (&sll, 1); // CHECK: atomicrmw add i64 + (void) __sync_fetch_and_add (&ull, 1); // CHECK: atomicrmw add i64 + + (void) __sync_fetch_and_sub (&sc, 1); // CHECK: atomicrmw sub i8 + (void) __sync_fetch_and_sub (&uc, 1); // CHECK: atomicrmw sub i8 + (void) __sync_fetch_and_sub (&ss, 1); // CHECK: atomicrmw sub i16 + (void) __sync_fetch_and_sub (&us, 1); // CHECK: atomicrmw sub i16 + (void) __sync_fetch_and_sub (&si, 1); // CHECK: atomicrmw sub i32 + (void) __sync_fetch_and_sub (&ui, 1); // CHECK: atomicrmw sub i32 + (void) __sync_fetch_and_sub (&sll, 1); // CHECK: atomicrmw sub i64 + (void) __sync_fetch_and_sub (&ull, 1); // CHECK: atomicrmw sub i64 + + (void) __sync_fetch_and_or (&sc, 1); // CHECK: atomicrmw or i8 + (void) __sync_fetch_and_or (&uc, 1); // CHECK: atomicrmw or i8 + (void) __sync_fetch_and_or (&ss, 1); // CHECK: atomicrmw or i16 + (void) __sync_fetch_and_or (&us, 1); // CHECK: atomicrmw or i16 + (void) __sync_fetch_and_or (&si, 1); // CHECK: atomicrmw or i32 + (void) __sync_fetch_and_or (&ui, 1); // CHECK: atomicrmw or i32 + (void) __sync_fetch_and_or (&sll, 1); // CHECK: atomicrmw or i64 + (void) __sync_fetch_and_or (&ull, 1); // CHECK: atomicrmw or i64 + + (void) __sync_fetch_and_xor (&sc, 1); // CHECK: atomicrmw xor i8 + (void) __sync_fetch_and_xor (&uc, 1); // CHECK: atomicrmw xor i8 + (void) __sync_fetch_and_xor (&ss, 1); // CHECK: atomicrmw xor i16 + (void) __sync_fetch_and_xor (&us, 1); // CHECK: atomicrmw xor i16 + (void) __sync_fetch_and_xor (&si, 1); // CHECK: atomicrmw xor i32 + (void) __sync_fetch_and_xor (&ui, 1); // CHECK: atomicrmw xor i32 + (void) __sync_fetch_and_xor (&sll, 1); // CHECK: atomicrmw xor i64 + (void) __sync_fetch_and_xor (&ull, 1); // CHECK: atomicrmw xor i64 + + (void) __sync_fetch_and_and (&sc, 1); // CHECK: atomicrmw and i8 + (void) __sync_fetch_and_and (&uc, 1); // CHECK: atomicrmw and i8 + (void) __sync_fetch_and_and (&ss, 1); // CHECK: atomicrmw and i16 + (void) __sync_fetch_and_and (&us, 1); // CHECK: atomicrmw and i16 + (void) __sync_fetch_and_and (&si, 1); // CHECK: atomicrmw and i32 + (void) __sync_fetch_and_and (&ui, 1); // CHECK: atomicrmw and i32 + (void) __sync_fetch_and_and (&sll, 1); // CHECK: atomicrmw and i64 + (void) __sync_fetch_and_and (&ull, 1); // CHECK: atomicrmw and i64 } -void test_fetch_and_op (void) +void test_fetch_and_op (void) // CHECK: define void @test_fetch_and_op { - sc = __sync_fetch_and_add (&sc, 11); - uc = __sync_fetch_and_add (&uc, 11); - ss = __sync_fetch_and_add (&ss, 11); - us = __sync_fetch_and_add (&us, 11); - si = __sync_fetch_and_add (&si, 11); - ui = __sync_fetch_and_add (&ui, 11); - sll = __sync_fetch_and_add (&sll, 11); - ull = __sync_fetch_and_add (&ull, 11); - - sc = __sync_fetch_and_sub (&sc, 11); - uc = __sync_fetch_and_sub (&uc, 11); - ss = __sync_fetch_and_sub (&ss, 11); - us = __sync_fetch_and_sub (&us, 11); - si = __sync_fetch_and_sub (&si, 11); - ui = __sync_fetch_and_sub (&ui, 11); - sll = __sync_fetch_and_sub (&sll, 11); - ull = __sync_fetch_and_sub (&ull, 11); - - sc = __sync_fetch_and_or (&sc, 11); - uc = __sync_fetch_and_or (&uc, 11); - ss = __sync_fetch_and_or (&ss, 11); - us = __sync_fetch_and_or (&us, 11); - si = __sync_fetch_and_or (&si, 11); - ui = __sync_fetch_and_or (&ui, 11); - sll = __sync_fetch_and_or (&sll, 11); - ull = __sync_fetch_and_or (&ull, 11); - - sc = __sync_fetch_and_xor (&sc, 11); - uc = __sync_fetch_and_xor (&uc, 11); - ss = __sync_fetch_and_xor (&ss, 11); - us = __sync_fetch_and_xor (&us, 11); - si = __sync_fetch_and_xor (&si, 11); - ui = __sync_fetch_and_xor (&ui, 11); - sll = __sync_fetch_and_xor (&sll, 11); - ull = __sync_fetch_and_xor (&ull, 11); - - sc = __sync_fetch_and_and (&sc, 11); - uc = __sync_fetch_and_and (&uc, 11); - ss = __sync_fetch_and_and (&ss, 11); - us = __sync_fetch_and_and (&us, 11); - si = __sync_fetch_and_and (&si, 11); - ui = __sync_fetch_and_and (&ui, 11); - sll = __sync_fetch_and_and (&sll, 11); - ull = __sync_fetch_and_and (&ull, 11); + sc = __sync_fetch_and_add (&sc, 11); // CHECK: atomicrmw add + uc = __sync_fetch_and_add (&uc, 11); // CHECK: atomicrmw add + ss = __sync_fetch_and_add (&ss, 11); // CHECK: atomicrmw add + us = __sync_fetch_and_add (&us, 11); // CHECK: atomicrmw add + si = __sync_fetch_and_add (&si, 11); // CHECK: atomicrmw add + ui = __sync_fetch_and_add (&ui, 11); // CHECK: atomicrmw add + sll = __sync_fetch_and_add (&sll, 11); // CHECK: atomicrmw add + ull = __sync_fetch_and_add (&ull, 11); // CHECK: atomicrmw add + + sc = __sync_fetch_and_sub (&sc, 11); // CHECK: atomicrmw sub + uc = __sync_fetch_and_sub (&uc, 11); // CHECK: atomicrmw sub + ss = __sync_fetch_and_sub (&ss, 11); // CHECK: atomicrmw sub + us = __sync_fetch_and_sub (&us, 11); // CHECK: atomicrmw sub + si = __sync_fetch_and_sub (&si, 11); // CHECK: atomicrmw sub + ui = __sync_fetch_and_sub (&ui, 11); // CHECK: atomicrmw sub + sll = __sync_fetch_and_sub (&sll, 11); // CHECK: atomicrmw sub + ull = __sync_fetch_and_sub (&ull, 11); // CHECK: atomicrmw sub + + sc = __sync_fetch_and_or (&sc, 11); // CHECK: atomicrmw or + uc = __sync_fetch_and_or (&uc, 11); // CHECK: atomicrmw or + ss = __sync_fetch_and_or (&ss, 11); // CHECK: atomicrmw or + us = __sync_fetch_and_or (&us, 11); // CHECK: atomicrmw or + si = __sync_fetch_and_or (&si, 11); // CHECK: atomicrmw or + ui = __sync_fetch_and_or (&ui, 11); // CHECK: atomicrmw or + sll = __sync_fetch_and_or (&sll, 11); // CHECK: atomicrmw or + ull = __sync_fetch_and_or (&ull, 11); // CHECK: atomicrmw or + + sc = __sync_fetch_and_xor (&sc, 11); // CHECK: atomicrmw xor + uc = __sync_fetch_and_xor (&uc, 11); // CHECK: atomicrmw xor + ss = __sync_fetch_and_xor (&ss, 11); // CHECK: atomicrmw xor + us = __sync_fetch_and_xor (&us, 11); // CHECK: atomicrmw xor + si = __sync_fetch_and_xor (&si, 11); // CHECK: atomicrmw xor + ui = __sync_fetch_and_xor (&ui, 11); // CHECK: atomicrmw xor + sll = __sync_fetch_and_xor (&sll, 11); // CHECK: atomicrmw xor + ull = __sync_fetch_and_xor (&ull, 11); // CHECK: atomicrmw xor + + sc = __sync_fetch_and_and (&sc, 11); // CHECK: atomicrmw and + uc = __sync_fetch_and_and (&uc, 11); // CHECK: atomicrmw and + ss = __sync_fetch_and_and (&ss, 11); // CHECK: atomicrmw and + us = __sync_fetch_and_and (&us, 11); // CHECK: atomicrmw and + si = __sync_fetch_and_and (&si, 11); // CHECK: atomicrmw and + ui = __sync_fetch_and_and (&ui, 11); // CHECK: atomicrmw and + sll = __sync_fetch_and_and (&sll, 11); // CHECK: atomicrmw and + ull = __sync_fetch_and_and (&ull, 11); // CHECK: atomicrmw and } void test_op_and_fetch (void) { - sc = __sync_add_and_fetch (&sc, uc); - uc = __sync_add_and_fetch (&uc, uc); - ss = __sync_add_and_fetch (&ss, uc); - us = __sync_add_and_fetch (&us, uc); - si = __sync_add_and_fetch (&si, uc); - ui = __sync_add_and_fetch (&ui, uc); - sll = __sync_add_and_fetch (&sll, uc); - ull = __sync_add_and_fetch (&ull, uc); - - sc = __sync_sub_and_fetch (&sc, uc); - uc = __sync_sub_and_fetch (&uc, uc); - ss = __sync_sub_and_fetch (&ss, uc); - us = __sync_sub_and_fetch (&us, uc); - si = __sync_sub_and_fetch (&si, uc); - ui = __sync_sub_and_fetch (&ui, uc); - sll = __sync_sub_and_fetch (&sll, uc); - ull = __sync_sub_and_fetch (&ull, uc); - - sc = __sync_or_and_fetch (&sc, uc); - uc = __sync_or_and_fetch (&uc, uc); - ss = __sync_or_and_fetch (&ss, uc); - us = __sync_or_and_fetch (&us, uc); - si = __sync_or_and_fetch (&si, uc); - ui = __sync_or_and_fetch (&ui, uc); - sll = __sync_or_and_fetch (&sll, uc); - ull = __sync_or_and_fetch (&ull, uc); - - sc = __sync_xor_and_fetch (&sc, uc); - uc = __sync_xor_and_fetch (&uc, uc); - ss = __sync_xor_and_fetch (&ss, uc); - us = __sync_xor_and_fetch (&us, uc); - si = __sync_xor_and_fetch (&si, uc); - ui = __sync_xor_and_fetch (&ui, uc); - sll = __sync_xor_and_fetch (&sll, uc); - ull = __sync_xor_and_fetch (&ull, uc); - - sc = __sync_and_and_fetch (&sc, uc); - uc = __sync_and_and_fetch (&uc, uc); - ss = __sync_and_and_fetch (&ss, uc); - us = __sync_and_and_fetch (&us, uc); - si = __sync_and_and_fetch (&si, uc); - ui = __sync_and_and_fetch (&ui, uc); - sll = __sync_and_and_fetch (&sll, uc); - ull = __sync_and_and_fetch (&ull, uc); + sc = __sync_add_and_fetch (&sc, uc); // CHECK: atomicrmw add + uc = __sync_add_and_fetch (&uc, uc); // CHECK: atomicrmw add + ss = __sync_add_and_fetch (&ss, uc); // CHECK: atomicrmw add + us = __sync_add_and_fetch (&us, uc); // CHECK: atomicrmw add + si = __sync_add_and_fetch (&si, uc); // CHECK: atomicrmw add + ui = __sync_add_and_fetch (&ui, uc); // CHECK: atomicrmw add + sll = __sync_add_and_fetch (&sll, uc); // CHECK: atomicrmw add + ull = __sync_add_and_fetch (&ull, uc); // CHECK: atomicrmw add + + sc = __sync_sub_and_fetch (&sc, uc); // CHECK: atomicrmw sub + uc = __sync_sub_and_fetch (&uc, uc); // CHECK: atomicrmw sub + ss = __sync_sub_and_fetch (&ss, uc); // CHECK: atomicrmw sub + us = __sync_sub_and_fetch (&us, uc); // CHECK: atomicrmw sub + si = __sync_sub_and_fetch (&si, uc); // CHECK: atomicrmw sub + ui = __sync_sub_and_fetch (&ui, uc); // CHECK: atomicrmw sub + sll = __sync_sub_and_fetch (&sll, uc); // CHECK: atomicrmw sub + ull = __sync_sub_and_fetch (&ull, uc); // CHECK: atomicrmw sub + + sc = __sync_or_and_fetch (&sc, uc); // CHECK: atomicrmw or + uc = __sync_or_and_fetch (&uc, uc); // CHECK: atomicrmw or + ss = __sync_or_and_fetch (&ss, uc); // CHECK: atomicrmw or + us = __sync_or_and_fetch (&us, uc); // CHECK: atomicrmw or + si = __sync_or_and_fetch (&si, uc); // CHECK: atomicrmw or + ui = __sync_or_and_fetch (&ui, uc); // CHECK: atomicrmw or + sll = __sync_or_and_fetch (&sll, uc); // CHECK: atomicrmw or + ull = __sync_or_and_fetch (&ull, uc); // CHECK: atomicrmw or + + sc = __sync_xor_and_fetch (&sc, uc); // CHECK: atomicrmw xor + uc = __sync_xor_and_fetch (&uc, uc); // CHECK: atomicrmw xor + ss = __sync_xor_and_fetch (&ss, uc); // CHECK: atomicrmw xor + us = __sync_xor_and_fetch (&us, uc); // CHECK: atomicrmw xor + si = __sync_xor_and_fetch (&si, uc); // CHECK: atomicrmw xor + ui = __sync_xor_and_fetch (&ui, uc); // CHECK: atomicrmw xor + sll = __sync_xor_and_fetch (&sll, uc); // CHECK: atomicrmw xor + ull = __sync_xor_and_fetch (&ull, uc); // CHECK: atomicrmw xor + + sc = __sync_and_and_fetch (&sc, uc); // CHECK: atomicrmw and + uc = __sync_and_and_fetch (&uc, uc); // CHECK: atomicrmw and + ss = __sync_and_and_fetch (&ss, uc); // CHECK: atomicrmw and + us = __sync_and_and_fetch (&us, uc); // CHECK: atomicrmw and + si = __sync_and_and_fetch (&si, uc); // CHECK: atomicrmw and + ui = __sync_and_and_fetch (&ui, uc); // CHECK: atomicrmw and + sll = __sync_and_and_fetch (&sll, uc); // CHECK: atomicrmw and + ull = __sync_and_and_fetch (&ull, uc); // CHECK: atomicrmw and } void test_compare_and_swap (void) { - sc = __sync_val_compare_and_swap (&sc, uc, sc); - uc = __sync_val_compare_and_swap (&uc, uc, sc); - ss = __sync_val_compare_and_swap (&ss, uc, sc); - us = __sync_val_compare_and_swap (&us, uc, sc); - si = __sync_val_compare_and_swap (&si, uc, sc); - ui = __sync_val_compare_and_swap (&ui, uc, sc); - sll = __sync_val_compare_and_swap (&sll, uc, sc); - ull = __sync_val_compare_and_swap (&ull, uc, sc); - - ui = __sync_bool_compare_and_swap (&sc, uc, sc); - ui = __sync_bool_compare_and_swap (&uc, uc, sc); - ui = __sync_bool_compare_and_swap (&ss, uc, sc); - ui = __sync_bool_compare_and_swap (&us, uc, sc); - ui = __sync_bool_compare_and_swap (&si, uc, sc); - ui = __sync_bool_compare_and_swap (&ui, uc, sc); - ui = __sync_bool_compare_and_swap (&sll, uc, sc); - ui = __sync_bool_compare_and_swap (&ull, uc, sc); + sc = __sync_val_compare_and_swap (&sc, uc, sc); // CHECK: cmpxchg i8 + uc = __sync_val_compare_and_swap (&uc, uc, sc); // CHECK: cmpxchg i8 + ss = __sync_val_compare_and_swap (&ss, uc, sc); // CHECK: cmpxchg i16 + us = __sync_val_compare_and_swap (&us, uc, sc); // CHECK: cmpxchg i16 + si = __sync_val_compare_and_swap (&si, uc, sc); // CHECK: cmpxchg i32 + ui = __sync_val_compare_and_swap (&ui, uc, sc); // CHECK: cmpxchg i32 + sll = __sync_val_compare_and_swap (&sll, uc, sc); // CHECK: cmpxchg i64 + ull = __sync_val_compare_and_swap (&ull, uc, sc); // CHECK: cmpxchg i64 + + ui = __sync_bool_compare_and_swap (&sc, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&uc, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&ss, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&us, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&si, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&ui, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&sll, uc, sc); // CHECK: cmpxchg + ui = __sync_bool_compare_and_swap (&ull, uc, sc); // CHECK: cmpxchg } void test_lock (void) { - sc = __sync_lock_test_and_set (&sc, 1); - uc = __sync_lock_test_and_set (&uc, 1); - ss = __sync_lock_test_and_set (&ss, 1); - us = __sync_lock_test_and_set (&us, 1); - si = __sync_lock_test_and_set (&si, 1); - ui = __sync_lock_test_and_set (&ui, 1); - sll = __sync_lock_test_and_set (&sll, 1); - ull = __sync_lock_test_and_set (&ull, 1); - - __sync_synchronize (); - - __sync_lock_release (&sc); - __sync_lock_release (&uc); - __sync_lock_release (&ss); - __sync_lock_release (&us); - __sync_lock_release (&si); - __sync_lock_release (&ui); - __sync_lock_release (&sll); - __sync_lock_release (&ull); + sc = __sync_lock_test_and_set (&sc, 1); // CHECK: atomicrmw xchg i8 + uc = __sync_lock_test_and_set (&uc, 1); // CHECK: atomicrmw xchg i8 + ss = __sync_lock_test_and_set (&ss, 1); // CHECK: atomicrmw xchg i16 + us = __sync_lock_test_and_set (&us, 1); // CHECK: atomicrmw xchg i16 + si = __sync_lock_test_and_set (&si, 1); // CHECK: atomicrmw xchg i32 + ui = __sync_lock_test_and_set (&ui, 1); // CHECK: atomicrmw xchg i32 + sll = __sync_lock_test_and_set (&sll, 1); // CHECK: atomicrmw xchg i64 + ull = __sync_lock_test_and_set (&ull, 1); // CHECK: atomicrmw xchg i64 + + __sync_synchronize (); // CHECK: fence seq_cst + + // FIXME: These are wrong! + __sync_lock_release (&sc); // CHECK: store volatile + __sync_lock_release (&uc); // CHECK: store volatile + __sync_lock_release (&ss); // CHECK: store volatile + __sync_lock_release (&us); // CHECK: store volatile + __sync_lock_release (&si); // CHECK: store volatile + __sync_lock_release (&ui); // CHECK: store volatile + __sync_lock_release (&sll); // CHECK: store volatile + __sync_lock_release (&ull); // CHECK: store volatile } diff --git a/clang/test/CodeGen/atomic.c b/clang/test/CodeGen/atomic.c index 97e12ebe8cb..a0adac8e1c5 100644 --- a/clang/test/CodeGen/atomic.c +++ b/clang/test/CodeGen/atomic.c @@ -10,118 +10,76 @@ int atomic(void) { int cmp = 0; old = __sync_fetch_and_add(&val, 1); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.add.i32.p0i32(i32* %val, i32 1) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw add i32* %val, i32 1 seq_cst old = __sync_fetch_and_sub(&valc, 2); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %valc, i8 2) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw sub i8* %valc, i8 2 seq_cst old = __sync_fetch_and_min(&val, 3); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.min.i32.p0i32(i32* %val, i32 3) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw min i32* %val, i32 3 seq_cst old = __sync_fetch_and_max(&val, 4); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.max.i32.p0i32(i32* %val, i32 4) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw max i32* %val, i32 4 seq_cst old = __sync_fetch_and_umin(&uval, 5u); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.umin.i32.p0i32(i32* %uval, i32 5) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw umin i32* %uval, i32 5 seq_cst old = __sync_fetch_and_umax(&uval, 6u); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.umax.i32.p0i32(i32* %uval, i32 6) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw umax i32* %uval, i32 6 seq_cst old = __sync_lock_test_and_set(&val, 7); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 7) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw xchg i32* %val, i32 7 seq_cst old = __sync_swap(&val, 8); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 8) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw xchg i32* %val, i32 8 seq_cst old = __sync_val_compare_and_swap(&val, 4, 1976); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %val, i32 4, i32 1976) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst old = __sync_bool_compare_and_swap(&val, 4, 1976); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %val, i32 4, i32 1976) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: cmpxchg i32* %val, i32 4, i32 1976 seq_cst old = __sync_fetch_and_and(&val, 0x9); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.and.i32.p0i32(i32* %val, i32 9) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw and i32* %val, i32 9 seq_cst old = __sync_fetch_and_or(&val, 0xa); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.or.i32.p0i32(i32* %val, i32 10) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw or i32* %val, i32 10 seq_cst old = __sync_fetch_and_xor(&val, 0xb); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %val, i32 11) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw xor i32* %val, i32 11 seq_cst old = __sync_add_and_fetch(&val, 1); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.add.i32.p0i32(i32* %val, i32 1) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw add i32* %val, i32 1 seq_cst old = __sync_sub_and_fetch(&val, 2); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %val, i32 2) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw sub i32* %val, i32 2 seq_cst old = __sync_and_and_fetch(&valc, 3); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i8 @llvm.atomic.load.and.i8.p0i8(i8* %valc, i8 3) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw and i8* %valc, i8 3 seq_cst old = __sync_or_and_fetch(&valc, 4); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i8 @llvm.atomic.load.or.i8.p0i8(i8* %valc, i8 4) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: atomicrmw or i8* %valc, i8 4 seq_cst old = __sync_xor_and_fetch(&valc, 5); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i8 @llvm.atomic.load.xor.i8.p0i8(i8* %valc, i8 5) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - + // CHECK: atomicrmw xor i8* %valc, i8 5 seq_cst __sync_val_compare_and_swap((void **)0, (void *)0, (void *)0); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* null, i32 0, i32 0) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst if ( __sync_val_compare_and_swap(&valb, 0, 1)) { - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %valb, i8 0, i8 1) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: cmpxchg i8* %valb, i8 0, i8 1 seq_cst old = 42; } __sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* null, i32 0, i32 0) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) + // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst __sync_lock_release(&val); + // FIXME: WRONG! // CHECK: store volatile i32 0, i32* __sync_synchronize (); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false) + // CHECK: fence seq_cst return old; } @@ -130,6 +88,7 @@ int atomic(void) { void release_return(int *lock) { // Ensure this is actually returning void all the way through. return __sync_lock_release(lock); + // FIXME: WRONG! // CHECK: store volatile i32 0, i32* } @@ -138,21 +97,11 @@ void release_return(int *lock) { // CHECK: @addrspace void addrspace(int __attribute__((address_space(256))) * P) { __sync_bool_compare_and_swap(P, 0, 1); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)*{{.*}}, i32 0, i32 1) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - - + // CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst + __sync_val_compare_and_swap(P, 0, 1); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)*{{.*}}, i32 0, i32 1) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - - + // CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst + __sync_xor_and_fetch(P, 123); - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - // CHECK: call i32 @llvm.atomic.load.xor.i32.p256i32(i32 addrspace(256)* {{.*}}, i32 123) - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) - + // CHECK: atomicrmw xor i32 addrspace(256)*{{.*}}, i32 123 seq_cst } - |