diff options
Diffstat (limited to 'llvm/test/Transforms')
| -rw-r--r-- | llvm/test/Transforms/InstCombine/cast.ll | 2 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/extractvalue.ll | 22 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/unpack-fca.ll | 168 |
3 files changed, 108 insertions, 84 deletions
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll index f8a94edfb7f..016b6aa6455 100644 --- a/llvm/test/Transforms/InstCombine/cast.ll +++ b/llvm/test/Transforms/InstCombine/cast.ll @@ -722,7 +722,7 @@ define i1 @test67(i1 %a, i32 %b) { ; CHECK: ret i1 false } -%s = type { i32, i32, i32 } +%s = type { i32, i32, i16 } define %s @test68(%s *%p, i64 %i) { ; CHECK-LABEL: @test68( diff --git a/llvm/test/Transforms/InstCombine/extractvalue.ll b/llvm/test/Transforms/InstCombine/extractvalue.ll index 6319590873a..9c293581a06 100644 --- a/llvm/test/Transforms/InstCombine/extractvalue.ll +++ b/llvm/test/Transforms/InstCombine/extractvalue.ll @@ -48,16 +48,16 @@ define i32 @foo(i32 %a, i32 %b) { ; CHECK: call {{.*}}(i32 [[LOAD]]) ; CHECK-NOT: extractvalue ; CHECK: ret i32 [[LOAD]] -define i32 @extract2gep({i32, i32}* %pair, i32* %P) { +define i32 @extract2gep({i16, i32}* %pair, i32* %P) { ; The load + extractvalue should be converted ; to an inbounds gep + smaller load. ; The new load should be in the same spot as the old load. - %L = load {i32, i32}, {i32, i32}* %pair + %L = load {i16, i32}, {i16, i32}* %pair store i32 0, i32* %P br label %loop loop: - %E = extractvalue {i32, i32} %L, 1 + %E = extractvalue {i16, i32} %L, 1 %C = call i32 @baz(i32 %E) store i32 %C, i32* %P %cond = icmp eq i32 %C, 0 @@ -67,17 +67,17 @@ end: ret i32 %E } -; CHECK-LABEL: define i32 @doubleextract2gep( +; CHECK-LABEL: define i16 @doubleextract2gep( ; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i64 0, i32 1, i32 1 -; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]] -; CHECK-NEXT: ret i32 [[LOAD]] -define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) { +; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i16, i16* [[GEP]] +; CHECK-NEXT: ret i16 [[LOAD]] +define i16 @doubleextract2gep({i16, {i32, i16}}* %arg) { ; The load + extractvalues should be converted ; to a 3-index inbounds gep + smaller load. - %L = load {i32, {i32, i32}}, {i32, {i32, i32}}* %arg - %E1 = extractvalue {i32, {i32, i32}} %L, 1 - %E2 = extractvalue {i32, i32} %E1, 1 - ret i32 %E2 + %L = load {i16, {i32, i16}}, {i16, {i32, i16}}* %arg + %E1 = extractvalue {i16, {i32, i16}} %L, 1 + %E2 = extractvalue {i32, i16} %E1, 1 + ret i16 %E2 } ; CHECK: define i32 @nogep-multiuse diff --git a/llvm/test/Transforms/InstCombine/unpack-fca.ll b/llvm/test/Transforms/InstCombine/unpack-fca.ll index 48bb157956a..9b8d1045749 100644 --- a/llvm/test/Transforms/InstCombine/unpack-fca.ll +++ b/llvm/test/Transforms/InstCombine/unpack-fca.ll @@ -5,110 +5,134 @@ target triple = "x86_64-unknown-linux-gnu" %A__vtbl = type { i8*, i32 (%A*)* } %A = type { %A__vtbl* } +%B = type { i8*, i64 } @A__vtblZ = constant %A__vtbl { i8* null, i32 (%A*)* @A.foo } declare i32 @A.foo(%A* nocapture %this) -declare i8* @allocmemory(i64) - -define void @storeA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to %A* +define void @storeA(%A* %a.ptr) { ; CHECK-LABEL: storeA -; CHECK: store %A__vtbl* @A__vtblZ - store %A { %A__vtbl* @A__vtblZ }, %A* %1, align 8 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds %A, %A* %a.ptr, i64 0, i32 0 +; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: ret void + store %A { %A__vtbl* @A__vtblZ }, %A* %a.ptr, align 8 + ret void +} + +define void @storeB(%B* %b.ptr) { +; CHECK-LABEL: storeB +; CHECK-NEXT: [[GEP1:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 0 +; CHECK-NEXT: store i8* null, i8** [[GEP1]], align 8 +; CHECK-NEXT: [[GEP2:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 1 +; CHECK-NEXT: store i64 42, i64* [[GEP2]], align 8 +; CHECK-NEXT: ret void + store %B { i8* null, i64 42 }, %B* %b.ptr, align 8 ret void } -define void @storeStructOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to { %A }* +define void @storeStructOfA({ %A }* %sa.ptr) { ; CHECK-LABEL: storeStructOfA -; CHECK: store %A__vtbl* @A__vtblZ - store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %1, align 8 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0 +; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: ret void + store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8 ret void } -define void @storeArrayOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to [1 x %A]* +define void @storeArrayOfA([1 x %A]* %aa.ptr) { ; CHECK-LABEL: storeArrayOfA -; CHECK: store %A__vtbl* @A__vtblZ - store [1 x %A] [%A { %A__vtbl* @A__vtblZ }], [1 x %A]* %1, align 8 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0 +; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: ret void + store [1 x %A] [%A { %A__vtbl* @A__vtblZ }], [1 x %A]* %aa.ptr, align 8 ret void } -define void @storeStructOfArrayOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to { [1 x %A] }* +define void @storeStructOfArrayOfA({ [1 x %A] }* %saa.ptr) { ; CHECK-LABEL: storeStructOfArrayOfA -; CHECK: store %A__vtbl* @A__vtblZ - store { [1 x %A] } { [1 x %A] [%A { %A__vtbl* @A__vtblZ }] }, { [1 x %A] }* %1, align 8 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0 +; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: ret void + store { [1 x %A] } { [1 x %A] [%A { %A__vtbl* @A__vtblZ }] }, { [1 x %A] }* %saa.ptr, align 8 ret void } -define %A @loadA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to %A* +define %A @loadA(%A* %a.ptr) { ; CHECK-LABEL: loadA -; CHECK: load %A__vtbl*, -; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0 - %2 = load %A, %A* %1, align 8 - ret %A %2 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds %A, %A* %a.ptr, i64 0, i32 0 +; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: [[IV:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0 +; CHECK-NEXT: ret %A [[IV]] + %1 = load %A, %A* %a.ptr, align 8 + ret %A %1 } -define { %A } @loadStructOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to { %A }* +define %B @loadB(%B* %b.ptr) { +; CHECK-LABEL: loadB +; CHECK-NEXT: [[GEP1:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 0 +; CHECK-NEXT: [[LOAD1:%[a-z0-9\.]+]] = load i8*, i8** [[GEP1]], align 8 +; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %B undef, i8* [[LOAD1]], 0 +; CHECK-NEXT: [[GEP2:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 1 +; CHECK-NEXT: [[LOAD2:%[a-z0-9\.]+]] = load i64, i64* [[GEP2]], align 8 +; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue %B [[IV1]], i64 [[LOAD2]], 1 +; CHECK-NEXT: ret %B [[IV2]] + %1 = load %B, %B* %b.ptr, align 8 + ret %B %1 +} + +define { %A } @loadStructOfA({ %A }* %sa.ptr) { ; CHECK-LABEL: loadStructOfA -; CHECK: load %A__vtbl*, -; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0 -; CHECK: insertvalue { %A } undef, %A {{.*}}, 0 - %2 = load { %A }, { %A }* %1, align 8 - ret { %A } %2 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0 +; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0 +; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue { %A } undef, %A [[IV1]], 0 +; CHECK-NEXT: ret { %A } [[IV2]] + %1 = load { %A }, { %A }* %sa.ptr, align 8 + ret { %A } %1 } -define [1 x %A] @loadArrayOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to [1 x %A]* +define [1 x %A] @loadArrayOfA([1 x %A]* %aa.ptr) { ; CHECK-LABEL: loadArrayOfA -; CHECK: load %A__vtbl*, -; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0 -; CHECK: insertvalue [1 x %A] undef, %A {{.*}}, 0 - %2 = load [1 x %A], [1 x %A]* %1, align 8 - ret [1 x %A] %2 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds [1 x %A], [1 x %A]* %aa.ptr, i64 0, i64 0, i32 0 +; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0 +; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue [1 x %A] undef, %A [[IV1]], 0 +; CHECK-NEXT: ret [1 x %A] [[IV2]] + %1 = load [1 x %A], [1 x %A]* %aa.ptr, align 8 + ret [1 x %A] %1 } -define { [1 x %A] } @loadStructOfArrayOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to { [1 x %A] }* +define { [1 x %A] } @loadStructOfArrayOfA({ [1 x %A] }* %saa.ptr) { ; CHECK-LABEL: loadStructOfArrayOfA -; CHECK: load %A__vtbl*, -; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0 -; CHECK: insertvalue [1 x %A] undef, %A {{.*}}, 0 -; CHECK: insertvalue { [1 x %A] } undef, [1 x %A] {{.*}}, 0 - %2 = load { [1 x %A] }, { [1 x %A] }* %1, align 8 - ret { [1 x %A] } %2 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { [1 x %A] }, { [1 x %A] }* %saa.ptr, i64 0, i32 0, i64 0, i32 0 +; CHECK-NEXT: [[LOAD:%[a-z0-9\.]+]] = load %A__vtbl*, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: [[IV1:%[a-z0-9\.]+]] = insertvalue %A undef, %A__vtbl* [[LOAD]], 0 +; CHECK-NEXT: [[IV2:%[a-z0-9\.]+]] = insertvalue [1 x %A] undef, %A [[IV1]], 0 +; CHECK-NEXT: [[IV3:%[a-z0-9\.]+]] = insertvalue { [1 x %A] } undef, [1 x %A] [[IV2]], 0 +; CHECK-NEXT: ret { [1 x %A] } [[IV3]] + %1 = load { [1 x %A] }, { [1 x %A] }* %saa.ptr, align 8 + ret { [1 x %A] } %1 } -define { %A } @structOfA() { -body: - %0 = tail call i8* @allocmemory(i64 32) - %1 = bitcast i8* %0 to { %A }* +define { %A } @structOfA({ %A }* %sa.ptr) { ; CHECK-LABEL: structOfA -; CHECK: store %A__vtbl* @A__vtblZ - store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %1, align 8 - %2 = load { %A }, { %A }* %1, align 8 -; CHECK-NOT: load -; CHECK: ret { %A } { %A { %A__vtbl* @A__vtblZ } } - ret { %A } %2 +; CHECK-NEXT: [[GEP:%[a-z0-9\.]+]] = getelementptr inbounds { %A }, { %A }* %sa.ptr, i64 0, i32 0, i32 0 +; CHECK-NEXT: store %A__vtbl* @A__vtblZ, %A__vtbl** [[GEP]], align 8 +; CHECK-NEXT: ret { %A } { %A { %A__vtbl* @A__vtblZ } } + store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %sa.ptr, align 8 + %1 = load { %A }, { %A }* %sa.ptr, align 8 + ret { %A } %1 +} + +define %B @structB(%B* %b.ptr) { +; CHECK-LABEL: structB +; CHECK-NEXT: [[GEP1:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 0 +; CHECK-NEXT: store i8* null, i8** [[GEP1]], align 8 +; CHECK-NEXT: [[GEP2:%[a-z0-9\.]+]] = getelementptr inbounds %B, %B* %b.ptr, i64 0, i32 1 +; CHECK-NEXT: store i64 42, i64* [[GEP2]], align 8 +; CHECK-NEXT: ret %B { i8* null, i64 42 } + store %B { i8* null, i64 42 }, %B* %b.ptr, align 8 + %1 = load %B, %B* %b.ptr, align 8 + ret %B %1 } |

