diff options
| author | Adam Nemet <anemet@apple.com> | 2016-04-14 08:47:17 +0000 |
|---|---|---|
| committer | Adam Nemet <anemet@apple.com> | 2016-04-14 08:47:17 +0000 |
| commit | 7aab6488310361f87e884d030aaa982ce825f957 (patch) | |
| tree | 6dd656ac898fc89c9bb65450dc5a689ba75a0a6f /llvm/test/Transforms/LoopVectorize/X86 | |
| parent | 8dcc8080ce2f3fed3135766af6b49b98e9b0b2d6 (diff) | |
| download | bcm5719-llvm-7aab6488310361f87e884d030aaa982ce825f957.tar.gz bcm5719-llvm-7aab6488310361f87e884d030aaa982ce825f957.zip | |
Revert "Support arbitrary addrspace pointers in masked load/store intrinsics"
This reverts commit r266086.
It breaks the LTO build of gcc in SPEC2000.
llvm-svn: 266282
Diffstat (limited to 'llvm/test/Transforms/LoopVectorize/X86')
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll | 115 |
1 files changed, 20 insertions, 95 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll index 1227344daff..268fb61dba3 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -18,16 +18,16 @@ target triple = "x86_64-pc_linux" ;AVX-LABEL: @foo1 ;AVX: icmp slt <8 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX: call <8 x i32> @llvm.masked.load.v8i32.p0v8i32 +;AVX: call <8 x i32> @llvm.masked.load.v8i32 ;AVX: add nsw <8 x i32> -;AVX: call void @llvm.masked.store.v8i32.p0v8i32 +;AVX: call void @llvm.masked.store.v8i32 ;AVX: ret void ;AVX512-LABEL: @foo1 ;AVX512: icmp slt <16 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX512: call <16 x i32> @llvm.masked.load.v16i32.p0v16i32 +;AVX512: call <16 x i32> @llvm.masked.load.v16i32 ;AVX512: add nsw <16 x i32> -;AVX512: call void @llvm.masked.store.v16i32.p0v16i32 +;AVX512: call void @llvm.masked.store.v16i32 ;AVX512: ret void ; Function Attrs: nounwind uwtable @@ -89,81 +89,6 @@ for.end: ; preds = %for.cond ret void } -; The same as @foo1 but all the pointers are address space 1 pointers. - -;AVX-LABEL: @foo1_addrspace1 -;AVX: icmp slt <8 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX: call <8 x i32> @llvm.masked.load.v8i32.p1v8i32 -;AVX: add nsw <8 x i32> -;AVX: call void @llvm.masked.store.v8i32.p1v8i32 -;AVX: ret void - -;AVX512-LABEL: @foo1_addrspace1 -;AVX512: icmp slt <16 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX512: call <16 x i32> @llvm.masked.load.v16i32.p1v16i32 -;AVX512: add nsw <16 x i32> -;AVX512: call void @llvm.masked.store.v16i32.p1v16i32 -;AVX512: ret void - -; Function Attrs: nounwind uwtable -define void @foo1_addrspace1(i32 addrspace(1)* %A, i32 addrspace(1)* %B, i32 addrspace(1)* %trigger) { -entry: - %A.addr = alloca i32 addrspace(1)*, align 8 - %B.addr = alloca i32 addrspace(1)*, align 8 - %trigger.addr = alloca i32 addrspace(1)*, align 8 - %i = alloca i32, align 4 - store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 8 - store i32 addrspace(1)* %B, i32 addrspace(1)** %B.addr, align 8 - store i32 addrspace(1)* %trigger, i32 addrspace(1)** %trigger.addr, align 8 - store i32 0, i32* %i, align 4 - br label %for.cond - -for.cond: ; preds = %for.inc, %entry - %0 = load i32, i32* %i, align 4 - %cmp = icmp slt i32 %0, 10000 - br i1 %cmp, label %for.body, label %for.end - -for.body: ; preds = %for.cond - %1 = load i32, i32* %i, align 4 - %idxprom = sext i32 %1 to i64 - %2 = load i32 addrspace(1)*, i32 addrspace(1)** %trigger.addr, align 8 - %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %2, i64 %idxprom - %3 = load i32, i32 addrspace(1)* %arrayidx, align 4 - %cmp1 = icmp slt i32 %3, 100 - br i1 %cmp1, label %if.then, label %if.end - -if.then: ; preds = %for.body - %4 = load i32, i32* %i, align 4 - %idxprom2 = sext i32 %4 to i64 - %5 = load i32 addrspace(1)*, i32 addrspace(1)** %B.addr, align 8 - %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %5, i64 %idxprom2 - %6 = load i32, i32 addrspace(1)* %arrayidx3, align 4 - %7 = load i32, i32* %i, align 4 - %idxprom4 = sext i32 %7 to i64 - %8 = load i32 addrspace(1)*, i32 addrspace(1)** %trigger.addr, align 8 - %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %8, i64 %idxprom4 - %9 = load i32, i32 addrspace(1)* %arrayidx5, align 4 - %add = add nsw i32 %6, %9 - %10 = load i32, i32* %i, align 4 - %idxprom6 = sext i32 %10 to i64 - %11 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 8 - %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %11, i64 %idxprom6 - store i32 %add, i32 addrspace(1)* %arrayidx7, align 4 - br label %if.end - -if.end: ; preds = %if.then, %for.body - br label %for.inc - -for.inc: ; preds = %if.end - %12 = load i32, i32* %i, align 4 - %inc = add nsw i32 %12, 1 - store i32 %inc, i32* %i, align 4 - br label %for.cond - -for.end: ; preds = %for.cond - ret void -} - ; The source code: ; ;void foo2(float *A, float *B, int *trigger) { @@ -177,16 +102,16 @@ for.end: ; preds = %for.cond ;AVX-LABEL: @foo2 ;AVX: icmp slt <8 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX: call <8 x float> @llvm.masked.load.v8f32.p0v8f32 +;AVX: call <8 x float> @llvm.masked.load.v8f32 ;AVX: fadd <8 x float> -;AVX: call void @llvm.masked.store.v8f32.p0v8f32 +;AVX: call void @llvm.masked.store.v8f32 ;AVX: ret void ;AVX512-LABEL: @foo2 ;AVX512: icmp slt <16 x i32> %wide.load, <i32 100, i32 100, i32 100 -;AVX512: call <16 x float> @llvm.masked.load.v16f32.p0v16f32 +;AVX512: call <16 x float> @llvm.masked.load.v16f32 ;AVX512: fadd <16 x float> -;AVX512: call void @llvm.masked.store.v16f32.p0v16f32 +;AVX512: call void @llvm.masked.store.v16f32 ;AVX512: ret void ; Function Attrs: nounwind uwtable @@ -262,18 +187,18 @@ for.end: ; preds = %for.cond ;AVX-LABEL: @foo3 ;AVX: icmp slt <4 x i32> %wide.load, <i32 100, i32 100, -;AVX: call <4 x double> @llvm.masked.load.v4f64.p0v4f64 +;AVX: call <4 x double> @llvm.masked.load.v4f64 ;AVX: sitofp <4 x i32> %wide.load to <4 x double> ;AVX: fadd <4 x double> -;AVX: call void @llvm.masked.store.v4f64.p0v4f64 +;AVX: call void @llvm.masked.store.v4f64 ;AVX: ret void ;AVX512-LABEL: @foo3 ;AVX512: icmp slt <8 x i32> %wide.load, <i32 100, i32 100, -;AVX512: call <8 x double> @llvm.masked.load.v8f64.p0v8f64 +;AVX512: call <8 x double> @llvm.masked.load.v8f64 ;AVX512: sitofp <8 x i32> %wide.load to <8 x double> ;AVX512: fadd <8 x double> -;AVX512: call void @llvm.masked.store.v8f64.p0v8f64 +;AVX512: call void @llvm.masked.store.v8f64 ;AVX512: ret void @@ -504,17 +429,17 @@ for.end: ; preds = %for.cond ;AVX2-LABEL: @foo6 ;AVX2: icmp sgt <4 x i32> %reverse, zeroinitializer ;AVX2: shufflevector <4 x i1>{{.*}}<4 x i32> <i32 3, i32 2, i32 1, i32 0> -;AVX2: call <4 x double> @llvm.masked.load.v4f64.p0v4f64 +;AVX2: call <4 x double> @llvm.masked.load.v4f64 ;AVX2: fadd <4 x double> -;AVX2: call void @llvm.masked.store.v4f64.p0v4f64 +;AVX2: call void @llvm.masked.store.v4f64 ;AVX2: ret void ;AVX512-LABEL: @foo6 ;AVX512: icmp sgt <8 x i32> %reverse, zeroinitializer ;AVX512: shufflevector <8 x i1>{{.*}}<8 x i32> <i32 7, i32 6, i32 5, i32 4 -;AVX512: call <8 x double> @llvm.masked.load.v8f64.p0v8f64 +;AVX512: call <8 x double> @llvm.masked.load.v8f64 ;AVX512: fadd <8 x double> -;AVX512: call void @llvm.masked.store.v8f64.p0v8f64 +;AVX512: call void @llvm.masked.store.v8f64 ;AVX512: ret void @@ -582,8 +507,8 @@ for.end: ; preds = %for.cond ; } ;AVX512-LABEL: @foo7 -;AVX512: call <8 x double*> @llvm.masked.load.v8p0f64.p0v8p0f64(<8 x double*>* -;AVX512: call void @llvm.masked.store.v8f64.p0v8f64 +;AVX512: call <8 x double*> @llvm.masked.load.v8p0f64(<8 x double*>* +;AVX512: call void @llvm.masked.store.v8f64 ;AVX512: ret void define void @foo7(double* noalias %out, double** noalias %in, i8* noalias %trigger, i32 %size) #0 { @@ -654,8 +579,8 @@ for.end: ; preds = %for.cond ;} ;AVX512-LABEL: @foo8 -;AVX512: call <8 x i32 ()*> @llvm.masked.load.v8p0f_i32f.p0v8p0f_i32f(<8 x i32 ()*>* % -;AVX512: call void @llvm.masked.store.v8f64.p0v8f64 +;AVX512: call <8 x i32 ()*> @llvm.masked.load.v8p0f_i32f(<8 x i32 ()*>* % +;AVX512: call void @llvm.masked.store.v8f64 ;AVX512: ret void define void @foo8(double* noalias %out, i32 ()** noalias %in, i8* noalias %trigger, i32 %size) #0 { |

