diff options
Diffstat (limited to 'llvm/test/CodeGen')
78 files changed, 576 insertions, 706 deletions
diff --git a/llvm/test/CodeGen/Hexagon/BranchPredict.ll b/llvm/test/CodeGen/Hexagon/BranchPredict.ll index 40791c98148..60d12df1807 100644 --- a/llvm/test/CodeGen/Hexagon/BranchPredict.ll +++ b/llvm/test/CodeGen/Hexagon/BranchPredict.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -ifcvt-limit=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -ifcvt-limit=0 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check if the branch probabilities are reflected in the instructions: ; The basic block placement pass should place the more probable successor diff --git a/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll b/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll index 9f94d426097..32c56053ce2 100644 --- a/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll +++ b/llvm/test/CodeGen/Hexagon/SUnit-boundary-prob.ll @@ -1,193 +1,176 @@ -; REQUIRES: asserts -; RUN: llc -march=hexagon -O2 -mcpu=hexagonv60 --stats -o - 2>&1 < %s | FileCheck %s +; RUN: llc -march=hexagon -O2 -mcpu=hexagonv60 -hexagon-initial-cfg-cleanup=0 --stats -o - 2>&1 < %s | FileCheck %s ; This was aborting while processing SUnits. +; REQUIRES: asserts ; CHECK: vmem ; CHECK-NOT: Number of node order issues found ; CHECK: Number of loops software pipelined ; CHECK-NOT: Number of node order issues found -source_filename = "bugpoint-output-bdb0052.bc" + target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon-unknown--elf" -; Function Attrs: nounwind readnone declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0 - -; Function Attrs: nounwind readnone declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0 - -; Function Attrs: nounwind readnone declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #0 - -; Function Attrs: nounwind readnone declare <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32>, <16 x i32>, i32) #0 - -; Function Attrs: nounwind readnone declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0 - -; Function Attrs: nounwind readnone declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #0 - -; Function Attrs: nounwind readnone declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #0 - -; Function Attrs: nounwind readnone declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #0 - -; Function Attrs: nounwind readnone declare <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32>, <16 x i32>, i32) #0 -define void @__error_op_vmpy_v__uh_v__uh__1() #1 { -entry: - %in_u16.host181 = load i16*, i16** undef, align 4 - %in_u32.host182 = load i32*, i32** undef, align 4 - br label %"for op_vmpy_v__uh_v__uh__1.s0.y" - -"for op_vmpy_v__uh_v__uh__1.s0.y": ; preds = %"end for op_vmpy_v__uh_v__uh__1.s0.x.x", %entry - %op_vmpy_v__uh_v__uh__1.s0.y = phi i32 [ 0, %entry ], [ %63, %"end for op_vmpy_v__uh_v__uh__1.s0.x.x" ] - %0 = mul nuw nsw i32 %op_vmpy_v__uh_v__uh__1.s0.y, 768 - %1 = add nuw nsw i32 %0, 32 - %2 = add nuw nsw i32 %0, 64 - %3 = add nuw nsw i32 %0, 96 - br label %"for op_vmpy_v__uh_v__uh__1.s0.x.x" - -"for op_vmpy_v__uh_v__uh__1.s0.x.x": ; preds = %"for op_vmpy_v__uh_v__uh__1.s0.x.x", %"for op_vmpy_v__uh_v__uh__1.s0.y" - %.phi210 = phi i32* [ %in_u32.host182, %"for op_vmpy_v__uh_v__uh__1.s0.y" ], [ %.inc211.3, %"for op_vmpy_v__uh_v__uh__1.s0.x.x" ] - %.phi213 = phi i16* [ %in_u16.host181, %"for op_vmpy_v__uh_v__uh__1.s0.y" ], [ %.inc214.3, %"for op_vmpy_v__uh_v__uh__1.s0.x.x" ] - %op_vmpy_v__uh_v__uh__1.s0.x.x = phi i32 [ 0, %"for op_vmpy_v__uh_v__uh__1.s0.y" ], [ %61, %"for op_vmpy_v__uh_v__uh__1.s0.x.x" ] - %4 = mul nuw nsw i32 %op_vmpy_v__uh_v__uh__1.s0.x.x, 32 - %5 = bitcast i32* %.phi210 to <16 x i32>* - %6 = load <16 x i32>, <16 x i32>* %5, align 64, !tbaa !1 - %7 = add nuw nsw i32 %4, 16 - %8 = getelementptr inbounds i32, i32* %in_u32.host182, i32 %7 - %9 = bitcast i32* %8 to <16 x i32>* - %10 = load <16 x i32>, <16 x i32>* %9, align 64, !tbaa !1 - %11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %10, <16 x i32> %6) - %e.i = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %11) #2 - %o.i = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %11) #2 - %r.i = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %o.i, <16 x i32> %e.i, i32 -4) #2 - %12 = bitcast i16* %.phi213 to <16 x i32>* - %13 = load <16 x i32>, <16 x i32>* %12, align 64, !tbaa !4 - %a_lo.i = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i) #2 - %a_hi.i = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i) #2 - %a_e.i = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %a_hi.i, <16 x i32> %a_lo.i) #2 - %a_o.i = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %a_hi.i, <16 x i32> %a_lo.i) #2 - %ab_e.i = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_e.i, <16 x i32> %13) #2 - %ab_o.i = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_o.i, <16 x i32> %13) #2 - %a_lo.i.i = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %ab_e.i) #2 - %l_lo.i.i = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %ab_o.i) #2 - %s_lo.i.i = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %a_lo.i.i, <16 x i32> %l_lo.i.i, i32 16) #2 - %l_hi.i.i = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %ab_o.i) #2 - %s_hi.i.i = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %l_hi.i.i, i32 16) #2 - %s.i.i = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %s_hi.i.i, <16 x i32> %s_lo.i.i) #2 - %e.i189 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %s.i.i) #2 - %o.i190 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %s.i.i) #2 - %r.i191 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %o.i190, <16 x i32> %e.i189, i32 -4) #2 - %14 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i191) - %15 = add nuw nsw i32 %4, %0 - %16 = getelementptr inbounds i32, i32* undef, i32 %15 - %17 = bitcast i32* %16 to <16 x i32>* - store <16 x i32> %14, <16 x i32>* %17, align 64, !tbaa !6 - %18 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i191) - store <16 x i32> %18, <16 x i32>* undef, align 64, !tbaa !6 - %.inc211 = getelementptr i32, i32* %.phi210, i32 32 - %.inc214 = getelementptr i16, i16* %.phi213, i32 32 - %19 = bitcast i32* %.inc211 to <16 x i32>* - %20 = load <16 x i32>, <16 x i32>* %19, align 64, !tbaa !1 - %21 = add nuw nsw i32 %4, 48 - %22 = getelementptr inbounds i32, i32* %in_u32.host182, i32 %21 - %23 = bitcast i32* %22 to <16 x i32>* - %24 = load <16 x i32>, <16 x i32>* %23, align 64, !tbaa !1 - %25 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %24, <16 x i32> %20) - %e.i.1 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %25) #2 - %r.i.1 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> undef, <16 x i32> %e.i.1, i32 -4) #2 - %26 = bitcast i16* %.inc214 to <16 x i32>* - %27 = load <16 x i32>, <16 x i32>* %26, align 64, !tbaa !4 - %a_lo.i.1 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i.1) #2 - %a_e.i.1 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> undef, <16 x i32> %a_lo.i.1) #2 - %a_o.i.1 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> %a_lo.i.1) #2 - %ab_e.i.1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_e.i.1, <16 x i32> %27) #2 - %ab_o.i.1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_o.i.1, <16 x i32> %27) #2 - %a_lo.i.i.1 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %ab_e.i.1) #2 - %s_lo.i.i.1 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %a_lo.i.i.1, <16 x i32> undef, i32 16) #2 - %a_hi.i.i.1 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %ab_e.i.1) #2 - %l_hi.i.i.1 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %ab_o.i.1) #2 - %s_hi.i.i.1 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %a_hi.i.i.1, <16 x i32> %l_hi.i.i.1, i32 16) #2 - %s.i.i.1 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %s_hi.i.i.1, <16 x i32> %s_lo.i.i.1) #2 - %e.i189.1 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %s.i.i.1) #2 - %o.i190.1 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %s.i.i.1) #2 - %r.i191.1 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %o.i190.1, <16 x i32> %e.i189.1, i32 -4) #2 - %28 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i191.1) - %29 = add nuw nsw i32 %1, %4 - %30 = getelementptr inbounds i32, i32* undef, i32 %29 - %31 = bitcast i32* %30 to <16 x i32>* - store <16 x i32> %28, <16 x i32>* %31, align 64, !tbaa !6 - %32 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i191.1) - %33 = add nuw nsw i32 %29, 16 - %34 = getelementptr inbounds i32, i32* undef, i32 %33 - %35 = bitcast i32* %34 to <16 x i32>* - store <16 x i32> %32, <16 x i32>* %35, align 64, !tbaa !6 - %.inc211.1 = getelementptr i32, i32* %.phi210, i32 64 - %.inc214.1 = getelementptr i16, i16* %.phi213, i32 64 - %36 = bitcast i32* %.inc211.1 to <16 x i32>* - %37 = load <16 x i32>, <16 x i32>* %36, align 64, !tbaa !1 - %38 = add nuw nsw i32 %4, 80 - %39 = getelementptr inbounds i32, i32* %in_u32.host182, i32 %38 - %40 = bitcast i32* %39 to <16 x i32>* - %41 = load <16 x i32>, <16 x i32>* %40, align 64, !tbaa !1 - %42 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %41, <16 x i32> %37) - %e.i.2 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %42) #2 - %o.i.2 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %42) #2 - %r.i.2 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %o.i.2, <16 x i32> %e.i.2, i32 -4) #2 - %43 = bitcast i16* %.inc214.1 to <16 x i32>* - %44 = load <16 x i32>, <16 x i32>* %43, align 64, !tbaa !4 - %a_lo.i.2 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i.2) #2 - %a_hi.i.2 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i.2) #2 - %a_e.i.2 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %a_hi.i.2, <16 x i32> %a_lo.i.2) #2 - %a_o.i.2 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %a_hi.i.2, <16 x i32> %a_lo.i.2) #2 - %ab_e.i.2 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_e.i.2, <16 x i32> %44) #2 - %ab_o.i.2 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a_o.i.2, <16 x i32> %44) #2 - %l_lo.i.i.2 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %ab_o.i.2) #2 - %s_lo.i.i.2 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %l_lo.i.i.2, i32 16) #2 - %a_hi.i.i.2 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %ab_e.i.2) #2 - %l_hi.i.i.2 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %ab_o.i.2) #2 - %s_hi.i.i.2 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %a_hi.i.i.2, <16 x i32> %l_hi.i.i.2, i32 16) #2 - %s.i.i.2 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %s_hi.i.i.2, <16 x i32> %s_lo.i.i.2) #2 - %e.i189.2 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %s.i.i.2) #2 - %o.i190.2 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %s.i.i.2) #2 - %r.i191.2 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %o.i190.2, <16 x i32> %e.i189.2, i32 -4) #2 - %45 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i191.2) - %46 = add nuw nsw i32 %2, %4 - %47 = getelementptr inbounds i32, i32* undef, i32 %46 - %48 = bitcast i32* %47 to <16 x i32>* - store <16 x i32> %45, <16 x i32>* %48, align 64, !tbaa !6 - %49 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i191.2) - %50 = add nuw nsw i32 %46, 16 - %51 = getelementptr inbounds i32, i32* undef, i32 %50 - %52 = bitcast i32* %51 to <16 x i32>* - store <16 x i32> %49, <16 x i32>* %52, align 64, !tbaa !6 - %e.i189.3 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> undef) #2 - %r.i191.3 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %e.i189.3, i32 -4) #2 - %53 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %r.i191.3) - %54 = add nuw nsw i32 %3, %4 - %55 = getelementptr inbounds i32, i32* undef, i32 %54 - %56 = bitcast i32* %55 to <16 x i32>* - store <16 x i32> %53, <16 x i32>* %56, align 64, !tbaa !6 - %57 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %r.i191.3) - %58 = add nuw nsw i32 %54, 16 - %59 = getelementptr inbounds i32, i32* undef, i32 %58 - %60 = bitcast i32* %59 to <16 x i32>* - store <16 x i32> %57, <16 x i32>* %60, align 64, !tbaa !6 - %61 = add nuw nsw i32 %op_vmpy_v__uh_v__uh__1.s0.x.x, 4 - %62 = icmp eq i32 %61, 24 - %.inc211.3 = getelementptr i32, i32* %.phi210, i32 128 - %.inc214.3 = getelementptr i16, i16* %.phi213, i32 128 - br i1 %62, label %"end for op_vmpy_v__uh_v__uh__1.s0.x.x", label %"for op_vmpy_v__uh_v__uh__1.s0.x.x" - -"end for op_vmpy_v__uh_v__uh__1.s0.x.x": ; preds = %"for op_vmpy_v__uh_v__uh__1.s0.x.x" - %63 = add nuw nsw i32 %op_vmpy_v__uh_v__uh__1.s0.y, 1 - br label %"for op_vmpy_v__uh_v__uh__1.s0.y" +define void @f0() #1 { +b0: + %v0 = load i16*, i16** undef, align 4 + %v1 = load i32*, i32** undef, align 4 + br label %b1 + +b1: ; preds = %b3, %b0 + %v2 = phi i32 [ 0, %b0 ], [ %v129, %b3 ] + %v3 = mul nuw nsw i32 %v2, 768 + %v4 = add nuw nsw i32 %v3, 32 + %v5 = add nuw nsw i32 %v3, 64 + %v6 = add nuw nsw i32 %v3, 96 + br label %b2 + +b2: ; preds = %b2, %b1 + %v7 = phi i32* [ %v1, %b1 ], [ %v127, %b2 ] + %v8 = phi i16* [ %v0, %b1 ], [ %v128, %b2 ] + %v9 = phi i32 [ 0, %b1 ], [ %v125, %b2 ] + %v10 = mul nuw nsw i32 %v9, 32 + %v11 = bitcast i32* %v7 to <16 x i32>* + %v12 = load <16 x i32>, <16 x i32>* %v11, align 64, !tbaa !1 + %v13 = add nuw nsw i32 %v10, 16 + %v14 = getelementptr inbounds i32, i32* %v1, i32 %v13 + %v15 = bitcast i32* %v14 to <16 x i32>* + %v16 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !1 + %v17 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v16, <16 x i32> %v12) + %v18 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v17) #2 + %v19 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v17) #2 + %v20 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v19, <16 x i32> %v18, i32 -4) #2 + %v21 = bitcast i16* %v8 to <16 x i32>* + %v22 = load <16 x i32>, <16 x i32>* %v21, align 64, !tbaa !4 + %v23 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v20) #2 + %v24 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v20) #2 + %v25 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v24, <16 x i32> %v23) #2 + %v26 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v24, <16 x i32> %v23) #2 + %v27 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v25, <16 x i32> %v22) #2 + %v28 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v26, <16 x i32> %v22) #2 + %v29 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v27) #2 + %v30 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v28) #2 + %v31 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v29, <16 x i32> %v30, i32 16) #2 + %v32 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v28) #2 + %v33 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %v32, i32 16) #2 + %v34 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v33, <16 x i32> %v31) #2 + %v35 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v34) #2 + %v36 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v34) #2 + %v37 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v36, <16 x i32> %v35, i32 -4) #2 + %v38 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v37) + %v39 = add nuw nsw i32 %v10, %v3 + %v40 = getelementptr inbounds i32, i32* undef, i32 %v39 + %v41 = bitcast i32* %v40 to <16 x i32>* + store <16 x i32> %v38, <16 x i32>* %v41, align 64, !tbaa !6 + %v42 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v37) + store <16 x i32> %v42, <16 x i32>* undef, align 64, !tbaa !6 + %v43 = getelementptr i32, i32* %v7, i32 32 + %v44 = getelementptr i16, i16* %v8, i32 32 + %v45 = bitcast i32* %v43 to <16 x i32>* + %v46 = load <16 x i32>, <16 x i32>* %v45, align 64, !tbaa !1 + %v47 = add nuw nsw i32 %v10, 48 + %v48 = getelementptr inbounds i32, i32* %v1, i32 %v47 + %v49 = bitcast i32* %v48 to <16 x i32>* + %v50 = load <16 x i32>, <16 x i32>* %v49, align 64, !tbaa !1 + %v51 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v50, <16 x i32> %v46) + %v52 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v51) #2 + %v53 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> undef, <16 x i32> %v52, i32 -4) #2 + %v54 = bitcast i16* %v44 to <16 x i32>* + %v55 = load <16 x i32>, <16 x i32>* %v54, align 64, !tbaa !4 + %v56 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v53) #2 + %v57 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> undef, <16 x i32> %v56) #2 + %v58 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> %v56) #2 + %v59 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v57, <16 x i32> %v55) #2 + %v60 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v58, <16 x i32> %v55) #2 + %v61 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v59) #2 + %v62 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v61, <16 x i32> undef, i32 16) #2 + %v63 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v59) #2 + %v64 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v60) #2 + %v65 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v63, <16 x i32> %v64, i32 16) #2 + %v66 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v65, <16 x i32> %v62) #2 + %v67 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v66) #2 + %v68 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v66) #2 + %v69 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v68, <16 x i32> %v67, i32 -4) #2 + %v70 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v69) + %v71 = add nuw nsw i32 %v4, %v10 + %v72 = getelementptr inbounds i32, i32* undef, i32 %v71 + %v73 = bitcast i32* %v72 to <16 x i32>* + store <16 x i32> %v70, <16 x i32>* %v73, align 64, !tbaa !6 + %v74 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v69) + %v75 = add nuw nsw i32 %v71, 16 + %v76 = getelementptr inbounds i32, i32* undef, i32 %v75 + %v77 = bitcast i32* %v76 to <16 x i32>* + store <16 x i32> %v74, <16 x i32>* %v77, align 64, !tbaa !6 + %v78 = getelementptr i32, i32* %v7, i32 64 + %v79 = getelementptr i16, i16* %v8, i32 64 + %v80 = bitcast i32* %v78 to <16 x i32>* + %v81 = load <16 x i32>, <16 x i32>* %v80, align 64, !tbaa !1 + %v82 = add nuw nsw i32 %v10, 80 + %v83 = getelementptr inbounds i32, i32* %v1, i32 %v82 + %v84 = bitcast i32* %v83 to <16 x i32>* + %v85 = load <16 x i32>, <16 x i32>* %v84, align 64, !tbaa !1 + %v86 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v85, <16 x i32> %v81) + %v87 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v86) #2 + %v88 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v86) #2 + %v89 = tail call <32 x i32> @llvm.hexagon.V6.vdealvdd(<16 x i32> %v88, <16 x i32> %v87, i32 -4) #2 + %v90 = bitcast i16* %v79 to <16 x i32>* + %v91 = load <16 x i32>, <16 x i32>* %v90, align 64, !tbaa !4 + %v92 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v89) #2 + %v93 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v89) #2 + %v94 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v93, <16 x i32> %v92) #2 + %v95 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v93, <16 x i32> %v92) #2 + %v96 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v94, <16 x i32> %v91) #2 + %v97 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %v95, <16 x i32> %v91) #2 + %v98 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v97) #2 + %v99 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> undef, <16 x i32> %v98, i32 16) #2 + %v100 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v96) #2 + %v101 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v97) #2 + %v102 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %v100, <16 x i32> %v101, i32 16) #2 + %v103 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v102, <16 x i32> %v99) #2 + %v104 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v103) #2 + %v105 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v103) #2 + %v106 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> %v105, <16 x i32> %v104, i32 -4) #2 + %v107 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v106) + %v108 = add nuw nsw i32 %v5, %v10 + %v109 = getelementptr inbounds i32, i32* undef, i32 %v108 + %v110 = bitcast i32* %v109 to <16 x i32>* + store <16 x i32> %v107, <16 x i32>* %v110, align 64, !tbaa !6 + %v111 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v106) + %v112 = add nuw nsw i32 %v108, 16 + %v113 = getelementptr inbounds i32, i32* undef, i32 %v112 + %v114 = bitcast i32* %v113 to <16 x i32>* + store <16 x i32> %v111, <16 x i32>* %v114, align 64, !tbaa !6 + %v115 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> undef) #2 + %v116 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v115, i32 -4) #2 + %v117 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v116) + %v118 = add nuw nsw i32 %v6, %v10 + %v119 = getelementptr inbounds i32, i32* undef, i32 %v118 + %v120 = bitcast i32* %v119 to <16 x i32>* + store <16 x i32> %v117, <16 x i32>* %v120, align 64, !tbaa !6 + %v121 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v116) + %v122 = add nuw nsw i32 %v118, 16 + %v123 = getelementptr inbounds i32, i32* undef, i32 %v122 + %v124 = bitcast i32* %v123 to <16 x i32>* + store <16 x i32> %v121, <16 x i32>* %v124, align 64, !tbaa !6 + %v125 = add nuw nsw i32 %v9, 4 + %v126 = icmp eq i32 %v125, 24 + %v127 = getelementptr i32, i32* %v7, i32 128 + %v128 = getelementptr i16, i16* %v8, i32 128 + br i1 %v126, label %b3, label %b2 + +b3: ; preds = %b2 + %v129 = add nuw nsw i32 %v2, 1 + br label %b1 } attributes #0 = { nounwind readnone } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll index f96dbf2af49..86687b3c047 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/build-vector-i32-type.ll @@ -6,14 +6,17 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" +@g0 = global <16 x float> zeroinitializer, align 8 +@g1 = global <16 x i32> zeroinitializer, align 8 + define void @fred() #0 { b0: - %v1 = load <16 x float>, <16 x float>* null, align 8 + %v1 = load <16 x float>, <16 x float>* @g0, align 8 %v2 = fcmp olt <16 x float> undef, %v1 %v3 = select <16 x i1> %v2, <16 x i16> undef, <16 x i16> zeroinitializer %v4 = sext <16 x i16> %v3 to <16 x i32> - store <16 x i32> %v4, <16 x i32>* undef, align 64 - unreachable + store <16 x i32> %v4, <16 x i32>* @g1, align 64 + ret void } attributes #0 = { noinline norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll index bc859aefad7..91384d3d9a7 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-inreg.ll @@ -7,14 +7,14 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define hidden fastcc void @fred() #0 { +define fastcc void @fred(i16* %a0, <16 x i32>* %a1) #0 { b0: - %v1 = load i16, i16* undef, align 2 + %v1 = load i16, i16* %a0, align 2 %v2 = insertelement <16 x i16> undef, i16 %v1, i32 15 %v3 = zext <16 x i16> %v2 to <16 x i32> %v4 = shl nuw <16 x i32> %v3, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> - store <16 x i32> %v4, <16 x i32>* undef, align 4 - unreachable + store <16 x i32> %v4, <16 x i32>* %a1, align 4 + ret void } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll index c04270a3f05..9a290192f63 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-anyext-pair.ll @@ -6,11 +6,11 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +define void @fred(<64 x i8>* %a0, <64 x i8>* %a1) #0 { b0: - %v1 = load <64 x i8>, <64 x i8>* undef, align 1 + %v1 = load <64 x i8>, <64 x i8>* %a0, align 1 %v2 = sext <64 x i8> %v1 to <64 x i32> - %v3 = load <64 x i8>, <64 x i8>* null, align 1 + %v3 = load <64 x i8>, <64 x i8>* %a1, align 1 %v4 = sext <64 x i8> %v3 to <64 x i32> %v5 = mul nsw <64 x i32> %v4, %v2 %v6 = add nsw <64 x i32> %v5, zeroinitializer @@ -18,8 +18,8 @@ b0: %v8 = ashr exact <64 x i32> %v7, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24> %v9 = mul nsw <64 x i32> %v8, %v8 %v10 = trunc <64 x i32> %v9 to <64 x i8> - store <64 x i8> %v10, <64 x i8>* undef, align 1 - unreachable + store <64 x i8> %v10, <64 x i8>* %a0, align 1 + ret void } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll index 91d95c0e7d5..4ba248a0962 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-build-undef.ll @@ -10,9 +10,11 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +@g0 = global <32 x i8> zeroinitializer + +define void @fred(i8* %a0) #0 { b0: - %v1 = load i8, i8* undef, align 1 + %v1 = load i8, i8* %a0, align 1 %v2 = insertelement <32 x i8> undef, i8 %v1, i32 31 %v3 = zext <32 x i8> %v2 to <32 x i16> %v4 = add nuw nsw <32 x i16> %v3, zeroinitializer @@ -32,8 +34,8 @@ b0: %v18 = add <32 x i16> %v17, zeroinitializer %v19 = lshr <32 x i16> %v18, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> %v20 = trunc <32 x i16> %v19 to <32 x i8> - store <32 x i8> %v20, <32 x i8>* undef, align 1 - unreachable + store <32 x i8> %v20, <32 x i8>* @g0, align 1 + ret void } attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll index 3cc73750164..67d8e6663cb 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-multiple.ll @@ -7,13 +7,13 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +define void @fred(i32* %a0, i32* %a1, i8* %a2) #0 { b0: - %v1 = load i32, i32* undef, align 4 + %v1 = load i32, i32* %a0, align 4 %v2 = mul nsw i32 %v1, -15137 %v3 = add nsw i32 0, %v2 %v4 = sub nsw i32 0, %v3 - %v5 = load i32, i32* undef, align 4 + %v5 = load i32, i32* %a1, align 4 %v6 = insertelement <2 x i32> undef, i32 %v5, i32 1 %v7 = add nsw <2 x i32> undef, %v6 %v8 = extractelement <2 x i32> %v7, i32 0 @@ -28,8 +28,8 @@ b0: %v17 = extractelement <8 x i32> %v16, i32 5 %v18 = getelementptr inbounds i8, i8* null, i32 %v17 %v19 = load i8, i8* %v18, align 1 - store i8 %v19, i8* undef, align 1 - unreachable + store i8 %v19, i8* %a2, align 1 + ret void } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll index 81e419981a3..1721c996fdb 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors-bool.ll @@ -6,6 +6,8 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" +@g0 = global <8 x i32> zeroinitializer, align 8 + define void @fred(<8 x float>* %a0, <8 x float>* %a1) #0 { b0: %v0 = load <8 x float>, <8 x float>* %a1, align 8 @@ -14,8 +16,8 @@ b0: %v3 = fcmp olt <8 x float> %v2, zeroinitializer %v4 = and <8 x i1> %v1, %v3 %v5 = zext <8 x i1> %v4 to <8 x i32> - store <8 x i32> %v5, <8 x i32>* undef, align 8 - unreachable + store <8 x i32> %v5, <8 x i32>* @g0, align 8 + ret void } attributes #0 = { noinline norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll index 5e37ef088cf..3ec4c7bc5ce 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-concat-vectors.ll @@ -10,7 +10,7 @@ declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0 declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #0 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0 -define void @crash() #1 { +define void @crash(<16 x i32>* %a0) #1 { b0: %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 0) #0 %v2 = bitcast <16 x i32> %v1 to <32 x i16> @@ -20,8 +20,8 @@ b0: %v6 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v5) #0 %v7 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v6, i32 -2) #0 %v8 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v7) - store <16 x i32> %v8, <16 x i32>* undef, align 2 - unreachable + store <16 x i32> %v8, <16 x i32>* %a0, align 2 + ret void } attributes #0 = { nounwind readnone } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll index 5575f630fe1..f446b63bf53 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-const-splat-bitcast.ll @@ -11,9 +11,11 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +@g0 = global <8 x i16> zeroinitializer, align 2 + +define i32 @fred() #0 { b0: - %v1 = load <8 x i16>, <8 x i16>* undef, align 2 + %v1 = load <8 x i16>, <8 x i16>* @g0, align 2 %v2 = icmp sgt <8 x i16> %v1, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11> %v3 = zext <8 x i1> %v2 to <8 x i32> %v4 = add nuw nsw <8 x i32> zeroinitializer, %v3 @@ -28,10 +30,10 @@ b0: br i1 %v12, label %b13, label %b14 b13: ; preds = %b0 - unreachable + ret i32 %v11 b14: ; preds = %b0 - ret void + ret i32 0 } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll index 948bdc075b9..b2b71bf0628 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-expand-unaligned-loads-noindexed.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check for successful compilation. ; CHECK: vmem diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-qfalse.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-qfalse.ll index b0cf12cd427..9324f524d7a 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-qfalse.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-qfalse.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Make sure we can select QFALSE. ; CHECK: vcmp.gt(v0.w,v0.w) diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll index c251292c9da..7b5b58bb5d0 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-select-const.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon-unknown--elf" -define void @fred() #0 { +define void @fred(<16 x i32>* %a0, <16 x i32>* %a1) #0 { b0: %v1 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> <i32 151388928, i32 353505036, i32 555621144, i32 757737252, i32 959853360, i32 1161969468, i32 1364085576, i32 1566201684, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32> undef, i32 3) %v2 = bitcast <16 x i32> %v1 to <64 x i8> @@ -14,15 +14,15 @@ b0: %v4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %v3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> %v5 = bitcast <64 x i8> %v4 to <16 x i32> %v6 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v5) - store <16 x i32> %v6, <16 x i32>* undef, align 1 + store <16 x i32> %v6, <16 x i32>* %a0, align 1 %v7 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> <i32 151388928, i32 353505036, i32 555621144, i32 757737252, i32 959853360, i32 1161969468, i32 1364085576, i32 1566201684, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32> zeroinitializer, i32 3) %v8 = bitcast <16 x i32> %v7 to <64 x i8> %v9 = shufflevector <64 x i8> %v8, <64 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> %v10 = shufflevector <32 x i8> %v9, <32 x i8> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> %v11 = bitcast <64 x i8> %v10 to <16 x i32> %v12 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v11) - store <16 x i32> %v12, <16 x i32>* undef, align 1 - unreachable + store <16 x i32> %v12, <16 x i32>* %a1, align 1 + ret void } declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #1 diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-setcc-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-setcc-pair.ll index d2db0cac235..587383b7e18 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-setcc-pair.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-setcc-pair.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check that a setcc of a vector pair is handled (without crashing). ; CHECK: vcmp diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll index aee2a59af1a..a8e71abfdc8 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-sext-inreg.ll @@ -8,9 +8,9 @@ target triple = "hexagon" ; CHECK-LABEL: danny: ; CHECK: vmem -define void @danny() #0 { +define void @danny(i16* %a0) #0 { b0: - %v1 = load i16, i16* undef, align 2 + %v1 = load i16, i16* %a0, align 2 %v2 = insertelement <8 x i16> undef, i16 %v1, i32 6 %v3 = insertelement <8 x i16> %v2, i16 undef, i32 7 %v4 = sext <8 x i16> %v3 to <8 x i32> @@ -24,15 +24,15 @@ b0: %v12 = sub nsw <8 x i32> zeroinitializer, %v11 %v13 = trunc <8 x i32> %v12 to <8 x i16> %v14 = extractelement <8 x i16> %v13, i32 7 - store i16 %v14, i16* undef, align 2 - unreachable + store i16 %v14, i16* %a0, align 2 + ret void } ; CHECK-LABEL: sammy: ; CHECK: vmem -define void @sammy() #1 { +define void @sammy(i16* %a0) #1 { b0: - %v1 = load i16, i16* undef, align 2 + %v1 = load i16, i16* %a0, align 2 %v2 = insertelement <16 x i16> undef, i16 %v1, i32 14 %v3 = insertelement <16 x i16> %v2, i16 undef, i32 15 %v4 = sext <16 x i16> %v3 to <16 x i32> @@ -46,8 +46,8 @@ b0: %v12 = sub nsw <16 x i32> zeroinitializer, %v11 %v13 = trunc <16 x i32> %v12 to <16 x i16> %v14 = extractelement <16 x i16> %v13, i32 15 - store i16 %v14, i16* undef, align 2 - unreachable + store i16 %v14, i16* %a0, align 2 + ret void } attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll index 72fdfad5529..269bfe6b9f5 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-shift-byte.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +define void @fred(<64 x i8>* %a0) #0 { b0: br label %b1 @@ -17,7 +17,7 @@ b1: ; preds = %b9, %b0 %v5 = trunc <64 x i32> %v4 to <64 x i8> %v6 = xor <64 x i8> %v5, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128> %v7 = sub <64 x i8> zeroinitializer, %v6 - store <64 x i8> %v7, <64 x i8>* undef, align 64 + store <64 x i8> %v7, <64 x i8>* %a0, align 64 br i1 false, label %b8, label %b9 b8: ; preds = %b1 diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll index 885612d4705..cc10c378f27 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-truncate.ll @@ -6,13 +6,14 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -; Function Attrs: norecurse nounwind +@g0 = global <16 x i16> zeroinitializer, align 2 + define void @fred(<16 x i32> %a0, <16 x i32> %a1) #0 { b0: %v0 = icmp eq <16 x i32> %a0, %a1 %v1 = select <16 x i1> %v0, <16 x i32> %a0, <16 x i32> zeroinitializer %v2 = trunc <16 x i32> %v1 to <16 x i16> - store <16 x i16> %v2, <16 x i16>* undef, align 2 + store <16 x i16> %v2, <16 x i16>* @g0, align 2 ret void } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll index 16ecb087401..1bb1fe34076 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-vec-ext.ll @@ -6,23 +6,23 @@ target triple = "hexagon" ; CHECK-LABEL: danny: ; CHECK: vunpack ; CHECK-NOT: vinsert -define void @danny(<16 x i16>* %a0) local_unnamed_addr #0 { +define void @danny(<16 x i16>* %a0, <16 x i32>* %a1) #0 { b2: %v16 = load <16 x i16>, <16 x i16>* %a0, align 128 %v17 = sext <16 x i16> %v16 to <16 x i32> - store <16 x i32> %v17, <16 x i32>* undef, align 128 - unreachable + store <16 x i32> %v17, <16 x i32>* %a1, align 128 + ret void } ; CHECK-LABEL: sammy: ; CHECK: vunpack ; CHECK-NOT: vinsert -define void @sammy(<32 x i16>* %a0) local_unnamed_addr #1 { +define void @sammy(<32 x i16>* %a0, <32 x i32>* %a1) #1 { b2: %v16 = load <32 x i16>, <32 x i16>* %a0, align 128 %v17 = sext <32 x i16> %v16 to <32 x i32> - store <32 x i32> %v17, <32 x i32>* undef, align 128 - unreachable + store <32 x i32> %v17, <32 x i32>* %a1, align 128 + ret void } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll index db226b7a3ef..359b8045e92 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-vsplat-pair.ll @@ -6,16 +6,16 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +define void @fred(<64 x i8>* %a0) #0 { b0: - %v1 = load <64 x i8>, <64 x i8>* undef, align 8 + %v1 = load <64 x i8>, <64 x i8>* %a0, align 8 %v2 = zext <64 x i8> %v1 to <64 x i32> %v3 = add nuw nsw <64 x i32> %v2, zeroinitializer %v4 = icmp ugt <64 x i32> %v3, <i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254, i32 254> %v5 = zext <64 x i1> %v4 to <64 x i32> %v6 = add nuw nsw <64 x i32> %v3, %v5 %v7 = trunc <64 x i32> %v6 to <64 x i8> - store <64 x i8> %v7, <64 x i8>* undef, align 8 + store <64 x i8> %v7, <64 x i8>* %a0, align 8 ret void } diff --git a/llvm/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll b/llvm/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll index 906a877b91e..9a10e60d445 100644 --- a/llvm/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll +++ b/llvm/test/CodeGen/Hexagon/avoid-predspill-calleesaved.ll @@ -1,7 +1,7 @@ ; Check that a callee-saved register will be saved correctly if ; the predicate-to-GPR spilling code uses it. ; -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; ; We expect to spill p0 into a general-purpose register and keep it there, ; without adding an extra spill of that register. diff --git a/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll b/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll index 4e9ad7899d7..cb1c8fdce9e 100644 --- a/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll +++ b/llvm/test/CodeGen/Hexagon/bit-bitsplit-at.ll @@ -7,23 +7,23 @@ target triple = "hexagon" -define hidden fastcc i32 @fred(i32 %a0) unnamed_addr #0 { +define fastcc i32 @fred(i32 %a0, i8* %a1, i1 %a2, i1 %a3) #0 { b1: %v2 = lshr i32 %a0, 16 %v3 = trunc i32 %v2 to i8 - br i1 undef, label %b6, label %b4 + br i1 %a2, label %b6, label %b4 b4: ; preds = %b1 %v5 = and i32 %a0, 65535 - br i1 undef, label %b8, label %b9 + br i1 %a3, label %b8, label %b9 b6: ; preds = %b1 %v7 = and i32 %a0, 65535 br label %b9 b8: ; preds = %b4 - store i8 %v3, i8* undef, align 2 - unreachable + store i8 %v3, i8* %a1, align 2 + ret i32 1 b9: ; preds = %b6, %b4 %v10 = phi i32 [ %v7, %b6 ], [ %v5, %b4 ] diff --git a/llvm/test/CodeGen/Hexagon/bit-extract-off.ll b/llvm/test/CodeGen/Hexagon/bit-extract-off.ll index 4086ca34bbb..d696800671a 100644 --- a/llvm/test/CodeGen/Hexagon/bit-extract-off.ll +++ b/llvm/test/CodeGen/Hexagon/bit-extract-off.ll @@ -7,13 +7,15 @@ target triple = "hexagon" +@g0 = global double zeroinitializer, align 8 + define hidden i32 @fred([101 x double]* %a0, i32 %a1, i32* %a2, i32* %a3) #0 { b4: br label %b5 b5: ; preds = %b5, %b4 %v6 = call double @fabs(double undef) #1 - store double %v6, double* undef, align 8 + store double %v6, double* @g0, align 8 br label %b5 } diff --git a/llvm/test/CodeGen/Hexagon/bit-has.ll b/llvm/test/CodeGen/Hexagon/bit-has.ll index 5bb0f2f60b0..f0b2ae15392 100644 --- a/llvm/test/CodeGen/Hexagon/bit-has.ll +++ b/llvm/test/CodeGen/Hexagon/bit-has.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; This used to crash. Check for some sane output. diff --git a/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll b/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll index 584cbca1ff4..21846c90f5b 100644 --- a/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll +++ b/llvm/test/CodeGen/Hexagon/bitconvert-vector.ll @@ -11,15 +11,15 @@ declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #0 declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #0 declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #0 -define void @fred() #1 { +define void @fred(<64 x i16>* %a0, <32 x i32>* %a1) #1 { entry: %t0 = bitcast <64 x i16> zeroinitializer to <32 x i32> %t1 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %t0, <32 x i32> undef, i32 2) %t2 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> %t1, i32 -2) %t3 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %t2) - store <64 x i16> zeroinitializer, <64 x i16>* undef, align 128 - store <32 x i32> %t3, <32 x i32>* undef, align 128 - unreachable + store <64 x i16> zeroinitializer, <64 x i16>* %a0, align 128 + store <32 x i32> %t3, <32 x i32>* %a1, align 128 + ret void } diff --git a/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll b/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll index 7efc38f15b3..f79f79e94a3 100644 --- a/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll +++ b/llvm/test/CodeGen/Hexagon/build-vector-shuffle.ll @@ -4,14 +4,14 @@ target triple = "hexagon" -define void @hex_interleaved.s0.__outermost() local_unnamed_addr #0 { +define void @f0(<16 x i32>* %a0) #0 { entry: - %0 = icmp eq i32 undef, 0 - %sel2 = select i1 %0, <32 x i16> undef, <32 x i16> zeroinitializer - %1 = bitcast <32 x i16> %sel2 to <16 x i32> - %2 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %1) - store <16 x i32> %2, <16 x i32>* undef, align 2 - unreachable + %v0 = icmp eq i32 undef, 0 + %v1 = select i1 %v0, <32 x i16> undef, <32 x i16> zeroinitializer + %v2 = bitcast <32 x i16> %v1 to <16 x i32> + %v3 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %v2) + store <16 x i32> %v3, <16 x i32>* %a0, align 2 + ret void } ; Function Attrs: nounwind readnone diff --git a/llvm/test/CodeGen/Hexagon/callR_noreturn.ll b/llvm/test/CodeGen/Hexagon/callR_noreturn.ll index 6350c6365a7..d78b1ab33f1 100644 --- a/llvm/test/CodeGen/Hexagon/callR_noreturn.ll +++ b/llvm/test/CodeGen/Hexagon/callR_noreturn.ll @@ -5,18 +5,17 @@ %s.1 = type { [1 x %s.2], i32, [4 x i8] } %s.2 = type { [16 x i32] } -; Function Attrs: noreturn nounwind -define hidden void @f0() #0 section ".text.compat" { +define hidden void @f0(void (%s.0*)* %a0) #0 { b0: br i1 undef, label %b2, label %b1 b1: ; preds = %b0 - unreachable + ret void b2: ; preds = %b0 - call void undef(%s.0* undef) #1 + call void %a0(%s.0* null) #1 unreachable } - + attributes #0 = { noreturn nounwind } attributes #1 = { noreturn } diff --git a/llvm/test/CodeGen/Hexagon/cext-check.ll b/llvm/test/CodeGen/Hexagon/cext-check.ll index 597136d97b0..38dfa9ca035 100644 --- a/llvm/test/CodeGen/Hexagon/cext-check.ll +++ b/llvm/test/CodeGen/Hexagon/cext-check.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -hexagon-eif=0 -ifcvt-limit=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-eif=0 -ifcvt-limit=0 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check that we constant extended instructions only when necessary. define i32 @cext_test1(i32* %a) nounwind { diff --git a/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll b/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll index 2d65a5c5848..5ade8709068 100644 --- a/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll +++ b/llvm/test/CodeGen/Hexagon/cfgopt-fall-through.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; Check for some sane output. This test used to crash. diff --git a/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll b/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll index 054aac965b2..4c266a68c24 100644 --- a/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll +++ b/llvm/test/CodeGen/Hexagon/convert_const_i1_to_i8.ll @@ -2,12 +2,12 @@ ; CHECK-NOT: .space {{[0-9][0-9][0-9][0-9]}} ; CHECK: q{{[0-3]}} = vand(v{{[0-9]*}},r{{[0-9]*}}) -define void @convert_const_i1_to_i8() #0 { +define void @convert_const_i1_to_i8(<32 x i32>* %a0) #0 { entry: - %v0 = load <32 x i32>, <32 x i32>* undef, align 128 + %v0 = load <32 x i32>, <32 x i32>* %a0, align 128 %v1 = tail call <32 x i32> @llvm.hexagon.V6.vrdelta.128B(<32 x i32> %v0, <32 x i32> undef) %v2 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <32 x i32> undef, <32 x i32> %v1) - store <32 x i32> %v2, <32 x i32>* null, align 128 + store <32 x i32> %v2, <32 x i32>* %a0, align 128 ret void } diff --git a/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll b/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll index 0a126c5c58b..9a4d4322b69 100644 --- a/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll +++ b/llvm/test/CodeGen/Hexagon/copy-to-combine-dbg.ll @@ -4,54 +4,47 @@ target triple = "hexagon" -define i32 @fred(i32 %p) local_unnamed_addr #0 !dbg !6 { -entry: - br label %cond.end - -cond.end: ; preds = %entry - br i1 undef, label %cond.false.i, label %for.body.lr.ph.i - -for.body.lr.ph.i: ; preds = %cond.end - tail call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !10, metadata !12) #0, !dbg !13 - br label %for.body.i - -cond.false.i: ; preds = %cond.end - unreachable - -for.body.i: ; preds = %for.inc.i, %for.body.lr.ph.i - %inc.sink37.i = phi i32 [ 0, %for.body.lr.ph.i ], [ %inc.i, %for.inc.i ] - %call.i = tail call i8* undef(i32 12, i8* undef) #0 - br label %for.inc.i - -for.inc.i: ; preds = %for.body.i - %inc.i = add nuw i32 %inc.sink37.i, 1 - %cmp1.i = icmp ult i32 %inc.i, %p - br i1 %cmp1.i, label %for.body.i, label %PQ_AllocMem.exit.loopexit - -PQ_AllocMem.exit.loopexit: ; preds = %for.inc.i - unreachable +; Function Attrs: nounwind +define i32 @f0(i32 %a0, i8* (i32, i8*)* %a1) local_unnamed_addr #0 !dbg !5 { +b0: + br label %b1 + +b1: ; preds = %b0 + call void @llvm.dbg.value(metadata i32 0, metadata !8, metadata !DIExpression()), !dbg !10 + br label %b2 + +b2: ; preds = %b3, %b1 + %v0 = phi i32 [ 0, %b1 ], [ %v2, %b3 ] + %v1 = tail call i8* %a1(i32 12, i8* null) #0 + br label %b3 + +b3: ; preds = %b2 + %v2 = add nuw i32 %v0, 1 + %v3 = icmp ult i32 %v2, %a0 + br i1 %v3, label %b2, label %b4 + +b4: ; preds = %b3 + ret i32 0 } -declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 +; Function Attrs: nounwind readnone speculatable +declare void @llvm.dbg.value(metadata, metadata, metadata) #1 attributes #0 = { nounwind } -attributes #1 = { nounwind readnone } +attributes #1 = { nounwind readnone speculatable } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!3, !4} -!llvm.ident = !{!5} -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 37afcb099ac2b001f4c826da7ca1d077b67a508c) (http://llvm.org/git/llvm.git 5887f1c75b3ba216850c834b186efdd3e54b7d4f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2) +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (http://llvm.org/git/clang.git 37afcb099ac2b001f4c826da7ca1d077b67a508c) (http://llvm.org/git/llvm.git 5887f1c75b3ba216850c834b186efdd3e54b7d4f)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) !1 = !DIFile(filename: "file.c", directory: "/") !2 = !{} !3 = !{i32 2, !"Dwarf Version", i32 4} !4 = !{i32 2, !"Debug Info Version", i32 3} -!5 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 37afcb099ac2b001f4c826da7ca1d077b67a508c) (http://llvm.org/git/llvm.git 5887f1c75b3ba216850c834b186efdd3e54b7d4f)"} -!6 = distinct !DISubprogram(name: "fred", scope: !1, file: !1, line: 116, type: !7, isLocal: false, isDefinition: true, scopeLine: 121, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !9) -!7 = !DISubroutineType(types: !2) -!8 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed) -!9 = !{!10} -!10 = !DILocalVariable(name: "Count", scope: !6, file: !1, line: 1, type: !8) -!11 = distinct !DILocation(line: 1, column: 1, scope: !6) -!12 = !DIExpression() -!13 = !DILocation(line: 1, column: 1, scope: !6, inlinedAt: !11) +!5 = distinct !DISubprogram(name: "fred", scope: !1, file: !1, line: 116, type: !6, isLocal: false, isDefinition: true, scopeLine: 121, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !7) +!6 = !DISubroutineType(types: !2) +!7 = !{!8} +!8 = !DILocalVariable(name: "Count", scope: !5, file: !1, line: 1, type: !9) +!9 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed) +!10 = !DILocation(line: 1, column: 1, scope: !5, inlinedAt: !11) +!11 = distinct !DILocation(line: 1, column: 1, scope: !5) diff --git a/llvm/test/CodeGen/Hexagon/dealloc-store.ll b/llvm/test/CodeGen/Hexagon/dealloc-store.ll index 9ec93382263..0f23aedbd73 100644 --- a/llvm/test/CodeGen/Hexagon/dealloc-store.ll +++ b/llvm/test/CodeGen/Hexagon/dealloc-store.ll @@ -22,7 +22,7 @@ target triple = "hexagon" define void @f0(%s.6* %a0, i8 zeroext %a1) { ; look for a dealloc_return in a packet with nothing else. ; -; CHECK: if (p{{[0-3]}}) memw( +; CHECK: memw(r1+#0) = r0 ; CHECK: } ; CHECK: { ; CHECK-NEXT: dealloc_return diff --git a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll index 2aa2a7580bc..937877ea285 100644 --- a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll +++ b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll @@ -1,68 +1,17 @@ -; RUN: llc -O2 -march=hexagon -hexagon-eif=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O2 < %s | FileCheck %s -; Make sure we are not rotating registers at O2. ; CHECK-NOT: p1 = -; CHECK-NOT: p2 = -target triple = "hexagon-unknown-linux-gnu" - -; Function Attrs: nounwind readnone -define i32 @f0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5) #0 { +define i32 @f0(i32 %a0, i32 %a1) #0 { b0: %v0 = icmp slt i32 %a0, %a1 br i1 %v0, label %b1, label %b2 -b1: ; preds = %b0 - %v1 = mul nsw i32 %a1, %a0 - br label %b2 - -b2: ; preds = %b1, %b0 - %v2 = phi i32 [ %v1, %b1 ], [ 0, %b0 ] - %v3 = icmp sgt i32 %a0, %a1 - br i1 %v3, label %b3, label %b4 - -b3: ; preds = %b2 - %v4 = mul nsw i32 %a2, %a1 - %v5 = add nsw i32 %v4, %a0 - %v6 = add nsw i32 %v5, %a3 - %v7 = add nsw i32 %v6, %v2 - br label %b4 - -b4: ; preds = %b3, %b2 - %v8 = phi i32 [ %v7, %b3 ], [ %v2, %b2 ] - %v9 = icmp sgt i32 %a2, %a3 - br i1 %v9, label %b5, label %b6 - -b5: ; preds = %b4 - %v10 = mul nsw i32 %a3, %a2 - %v11 = add nsw i32 %v8, %v10 - br label %b6 +b1: + ret i32 0 -b6: ; preds = %b5, %b4 - %v12 = phi i32 [ %v11, %b5 ], [ %v8, %b4 ] - %v13 = icmp sgt i32 %a3, %a2 - br i1 %v13, label %b7, label %b8 - -b7: ; preds = %b6 - %v14 = sdiv i32 %a3, 2 - %v15 = mul nsw i32 %v14, %a0 - %v16 = add nsw i32 %v15, %v12 - br label %b8 - -b8: ; preds = %b7, %b6 - %v17 = phi i32 [ %v16, %b7 ], [ %v12, %b6 ] - %v18 = icmp slt i32 %a4, %a5 - br i1 %v18, label %b9, label %b10 - -b9: ; preds = %b8 - %v19 = mul i32 %a4, %a3 - %v20 = mul i32 %v19, %a5 - %v21 = add nsw i32 %v17, %v20 - br label %b10 - -b10: ; preds = %b9, %b8 - %v22 = phi i32 [ %v21, %b9 ], [ %v17, %b8 ] - ret i32 %v22 +b2: + %v1 = icmp slt i32 %a1, 100 + %v2 = select i1 %v1, i32 123, i32 321 + ret i32 %v2 } - -attributes #0 = { nounwind readnone "target-cpu"="hexagonv55" } diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll index 350b0edec85..222c70112ab 100644 --- a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll +++ b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-bad.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; Check for some output other than crashing. diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll index dbcba1aa7d0..e9150c6b93b 100644 --- a/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll +++ b/llvm/test/CodeGen/Hexagon/expand-condsets-dead-pred.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; Check for some output (as opposed to a crash). diff --git a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll index 5ac0f59bd2d..fa66db24fea 100644 --- a/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll +++ b/llvm/test/CodeGen/Hexagon/expand-vstorerw-undef.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; After register allocation it is possible to have a spill of a register ; that is only partially defined. That in itself it fine, but creates a diff --git a/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll b/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll index a33bee0e50b..175318d9967 100644 --- a/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll +++ b/llvm/test/CodeGen/Hexagon/ignore-terminal-mbb.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; CHECK-NOT: if{{.*}}jump{{.*}}-1 ; CHECK: memw diff --git a/llvm/test/CodeGen/Hexagon/indirect-br.ll b/llvm/test/CodeGen/Hexagon/indirect-br.ll index 188eebff5c7..e8bab5f2473 100644 --- a/llvm/test/CodeGen/Hexagon/indirect-br.ll +++ b/llvm/test/CodeGen/Hexagon/indirect-br.ll @@ -1,14 +1,14 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s -;CHECK: jumpr r{{[0-9]+}} +; CHECK: jumpr r{{[0-9]+}} define i32 @check_indirect_br(i8* %target) nounwind { entry: - indirectbr i8* %target, [label %test_label] + indirectbr i8* %target, [label %test_label] test_label: - br label %ret + br label %ret ret: - ret i32 -1 + ret i32 -1 } diff --git a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll index c479b949c6f..3c56db4d90d 100644 --- a/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll +++ b/llvm/test/CodeGen/Hexagon/isel-global-offset-alignment.ll @@ -27,7 +27,7 @@ b3: ; preds = %b1, %b0 %v8 = getelementptr i16, i16* %v7, i32 88 %v9 = bitcast i16* %v8 to <8 x i16>* store <8 x i16> zeroinitializer, <8 x i16>* %v9, align 8 - unreachable + ret void } attributes #0 = { norecurse nounwind } diff --git a/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll b/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll index 8201c738d33..42098f37f3a 100644 --- a/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll +++ b/llvm/test/CodeGen/Hexagon/isel-setcc-i1.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check that this compiles successfully. ; CHECK: if (p0) diff --git a/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll b/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll index c6176aa1fe5..33a9b0ea16a 100644 --- a/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll +++ b/llvm/test/CodeGen/Hexagon/isel-simplify-crash.ll @@ -22,7 +22,7 @@ b5: ; preds = %b1 ret void b11: ; preds = %b1 - unreachable + ret void } attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/long-calls.ll b/llvm/test/CodeGen/Hexagon/long-calls.ll index 9f9a527a542..628362783c9 100644 --- a/llvm/test/CodeGen/Hexagon/long-calls.ll +++ b/llvm/test/CodeGen/Hexagon/long-calls.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -enable-save-restore-long < %s | FileCheck %s +; RUN: llc -march=hexagon -enable-save-restore-long -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Check that the -long-calls feature is supported by the backend. @@ -7,7 +7,7 @@ define i64 @test_longcall(i32 %x, i32 %y) #0 { entry: %add = add nsw i32 %x, 5 - %call = tail call i64 @foo(i32 %add) #6 + %call = tail call i64 @foo(i32 %add) #1 %conv = sext i32 %y to i64 %add1 = add nsw i64 %call, %conv ret i64 %add1 @@ -17,7 +17,7 @@ entry: define i64 @test_longtailcall(i32 %x, i32 %y) #1 { entry: %add = add nsw i32 %x, 5 - %call = tail call i64 @foo(i32 %add) #6 + %call = tail call i64 @foo(i32 %add) #1 ret i64 %call } @@ -25,7 +25,7 @@ entry: define i64 @test_longnoret(i32 %x, i32 %y) #2 { entry: %add = add nsw i32 %x, 5 - %0 = tail call i64 @bar(i32 %add) #7 + %0 = tail call i64 @bar(i32 %add) #6 unreachable } @@ -36,7 +36,7 @@ entry: define i64 @test_shortcall(i32 %x, i32 %y) #3 { entry: %add = add nsw i32 %x, 5 - %call = tail call i64 @foo(i32 %add) #6 + %call = tail call i64 @foo(i32 %add) #1 %conv = sext i32 %y to i64 %add1 = add nsw i64 %call, %conv ret i64 %add1 @@ -46,7 +46,7 @@ entry: define i64 @test_shorttailcall(i32 %x, i32 %y) #4 { entry: %add = add nsw i32 %x, 5 - %call = tail call i64 @foo(i32 %add) #6 + %call = tail call i64 @foo(i32 %add) #1 ret i64 %call } @@ -54,12 +54,12 @@ entry: define i64 @test_shortnoret(i32 %x, i32 %y) #5 { entry: %add = add nsw i32 %x, 5 - %0 = tail call i64 @bar(i32 %add) #7 + %0 = tail call i64 @bar(i32 %add) #6 unreachable } -declare i64 @foo(i32) #6 -declare i64 @bar(i32) #7 +declare i64 @foo(i32) #1 +declare i64 @bar(i32) #6 attributes #0 = { minsize nounwind "target-cpu"="hexagonv60" "target-features"="+long-calls" } attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+long-calls" } @@ -69,5 +69,4 @@ attributes #3 = { minsize nounwind "target-cpu"="hexagonv60" "target-features"=" attributes #4 = { nounwind "target-cpu"="hexagonv60" "target-features"="-long-calls" } attributes #5 = { noreturn nounwind "target-cpu"="hexagonv60" "target-features"="-long-calls" } -attributes #6 = { noreturn "target-cpu"="hexagonv60" } -attributes #7 = { noreturn nounwind "target-cpu"="hexagonv60" } +attributes #6 = { noreturn nounwind "target-cpu"="hexagonv60" } diff --git a/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll b/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll index 09ca465c671..43b8119f76b 100644 --- a/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll +++ b/llvm/test/CodeGen/Hexagon/lower-extract-subvector.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -O3 < %s | FileCheck %s +; RUN: llc -march=hexagon < %s | FileCheck %s ; This test checks if we custom lower extract_subvector. If we cannot ; custom lower extract_subvector this test makes the compiler crash. @@ -6,41 +6,15 @@ ; CHECK: vmem target triple = "hexagon-unknown--elf" -; Function Attrs: nounwind -define void @__processed() #0 { -entry: - br label %"for matrix.s0.y" - -"for matrix.s0.y": ; preds = %"for matrix.s0.y", %entry - br i1 undef, label %"produce processed", label %"for matrix.s0.y" - -"produce processed": ; preds = %"for matrix.s0.y" - br i1 undef, label %"for processed.s0.ty.ty.preheader", label %"consume processed" - -"for processed.s0.ty.ty.preheader": ; preds = %"produce processed" - br i1 undef, label %"for denoised.s0.y.preheader", label %"consume denoised" - -"for denoised.s0.y.preheader": ; preds = %"for processed.s0.ty.ty.preheader" - unreachable - -"consume denoised": ; preds = %"for processed.s0.ty.ty.preheader" - br i1 undef, label %"consume deinterleaved", label %if.then.i164 - -if.then.i164: ; preds = %"consume denoised" - unreachable - -"consume deinterleaved": ; preds = %"consume denoised" - %0 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> undef, i32 -2) - %1 = bitcast <64 x i32> %0 to <128 x i16> - %2 = shufflevector <128 x i16> %1, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> - store <64 x i16> %2, <64 x i16>* undef, align 128 - unreachable - -"consume processed": ; preds = %"produce processed" +define void @f0(<64 x i16>* %a0) #0 { +b0: + %v0 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> undef, i32 -2) + %v1 = bitcast <64 x i32> %v0 to <128 x i16> + %v2 = shufflevector <128 x i16> %v1, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> + store <64 x i16> %v2, <64 x i16>* %a0, align 128 ret void } -; Function Attrs: nounwind readnone declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #1 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length128b" } diff --git a/llvm/test/CodeGen/Hexagon/mux-undef.ll b/llvm/test/CodeGen/Hexagon/mux-undef.ll index 3780a329b1e..6b3d93ea49d 100644 --- a/llvm/test/CodeGen/Hexagon/mux-undef.ll +++ b/llvm/test/CodeGen/Hexagon/mux-undef.ll @@ -1,18 +1,18 @@ ; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s ; ; Make sure this test compiles successfully. -; CHECK: jumpr r31 +; CHECK: call foo target triple = "hexagon--elf" ; Function Attrs: nounwind -define i32 @fred() #0 { +define i32 @fred(i1 %a0) #0 { b0: call void @foo() #0 br label %b1 b1: ; preds = %b0 - br i1 undef, label %b2, label %b3 + br i1 %a0, label %b2, label %b3 b2: ; preds = %b1 br label %b3 diff --git a/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll b/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll index 39f32fb2f9d..e9a633a494c 100644 --- a/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll +++ b/llvm/test/CodeGen/Hexagon/newvalueSameReg.ll @@ -1,63 +1,63 @@ -; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-expand-condsets=0 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; ; Expand-condsets eliminates the "mux" instruction, which is what this ; testcase is checking. -%struct._Dnk_filet.1 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32*, [2 x i32], i8*, i8*, i8*, %struct._Mbstatet.0, i8*, [8 x i8], i8 } -%struct._Mbstatet.0 = type { i32, i16, i16 } - -@_Stdout = external global %struct._Dnk_filet.1 -@.str = external unnamed_addr constant [23 x i8], align 8 - ; Test that we don't generate a new value compare if the operands are ; the same register. ; CHECK-NOT: cmp.eq([[REG0:(r[0-9]+)]].new,[[REG0]]) ; CHECK: cmp.eq([[REG1:(r[0-9]+)]],[[REG1]]) +%s.0 = type { i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i8*, i32*, [2 x i32], i8*, i8*, i8*, %s.1, i8*, [8 x i8], i8 } +%s.1 = type { i32, i16, i16 } + +@g0 = external global %s.0 +@g1 = external unnamed_addr constant [23 x i8], align 8 + ; Function Attrs: nounwind -declare void @fprintf(%struct._Dnk_filet.1* nocapture, i8* nocapture readonly, ...) #1 - -define void @main() #0 { -entry: - %0 = load i32*, i32** undef, align 4 - %1 = load i32, i32* undef, align 4 - br i1 undef, label %if.end, label %_ZNSt6vectorIbSaIbEE3endEv.exit - -_ZNSt6vectorIbSaIbEE3endEv.exit: - %2 = icmp slt i32 %1, 0 - %sub5.i.i.i = lshr i32 %1, 5 - %add619.i.i.i = add i32 %sub5.i.i.i, -134217728 - %sub5.i.pn.i.i = select i1 %2, i32 %add619.i.i.i, i32 %sub5.i.i.i - %storemerge2.i.i = getelementptr inbounds i32, i32* %0, i32 %sub5.i.pn.i.i - %cmp.i.i = icmp ult i32* %storemerge2.i.i, %0 - %.mux = select i1 %cmp.i.i, i32 0, i32 1 - br i1 undef, label %_ZNSt6vectorIbSaIbEE3endEv.exit57, label %if.end - -_ZNSt6vectorIbSaIbEE3endEv.exit57: - %3 = icmp slt i32 %1, 0 - %sub5.i.i.i44 = lshr i32 %1, 5 - %add619.i.i.i45 = add i32 %sub5.i.i.i44, -134217728 - %sub5.i.pn.i.i46 = select i1 %3, i32 %add619.i.i.i45, i32 %sub5.i.i.i44 - %storemerge2.i.i47 = getelementptr inbounds i32, i32* %0, i32 %sub5.i.pn.i.i46 - %cmp.i38 = icmp ult i32* %storemerge2.i.i47, %0 - %.reg2mem.sroa.0.sroa.0.0.load14.i.reload = select i1 %cmp.i38, i32 0, i32 1 - %cmp = icmp eq i32 %.mux, %.reg2mem.sroa.0.sroa.0.0.load14.i.reload - br i1 %cmp, label %if.end, label %if.then - -if.then: - call void (%struct._Dnk_filet.1*, i8*, ...) @fprintf(%struct._Dnk_filet.1* @_Stdout, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0), i32 %.mux, i32 %.reg2mem.sroa.0.sroa.0.0.load14.i.reload) #1 +declare void @f0(%s.0* nocapture, i8* nocapture readonly, ...) #0 + +define void @f1() #1 { +b0: + %v0 = load i32*, i32** undef, align 4 + %v1 = load i32, i32* undef, align 4 + br i1 undef, label %b4, label %b1 + +b1: ; preds = %b0 + %v2 = icmp slt i32 %v1, 0 + %v3 = lshr i32 %v1, 5 + %v4 = add i32 %v3, -134217728 + %v5 = select i1 %v2, i32 %v4, i32 %v3 + %v6 = getelementptr inbounds i32, i32* %v0, i32 %v5 + %v7 = icmp ult i32* %v6, %v0 + %v8 = select i1 %v7, i32 0, i32 1 + br i1 undef, label %b2, label %b4 + +b2: ; preds = %b1 + %v9 = icmp slt i32 %v1, 0 + %v10 = lshr i32 %v1, 5 + %v11 = add i32 %v10, -134217728 + %v12 = select i1 %v9, i32 %v11, i32 %v10 + %v13 = getelementptr inbounds i32, i32* %v0, i32 %v12 + %v14 = icmp ult i32* %v13, %v0 + %v15 = select i1 %v14, i32 0, i32 1 + %v16 = icmp eq i32 %v8, %v15 + br i1 %v16, label %b4, label %b3 + +b3: ; preds = %b2 + call void (%s.0*, i8*, ...) @f0(%s.0* @g0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @g1, i32 0, i32 0), i32 %v8, i32 %v15) #0 unreachable -if.end: - br i1 undef, label %_ZNSt6vectorIbSaIbEED2Ev.exit, label %if.then.i.i.i +b4: ; preds = %b2, %b1, %b0 + br i1 undef, label %b6, label %b5 -if.then.i.i.i: +b5: ; preds = %b4 unreachable -_ZNSt6vectorIbSaIbEED2Ev.exit: +b6: ; preds = %b4 ret void } -attributes #0 = { "target-cpu"="hexagonv5" } -attributes #1 = { nounwind "target-cpu"="hexagonv5" } +attributes #0 = { nounwind "target-cpu"="hexagonv5" } +attributes #1 = { "target-cpu"="hexagonv5" } diff --git a/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll b/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll index bb744c8aedd..e41fcff85d6 100644 --- a/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll +++ b/llvm/test/CodeGen/Hexagon/newvaluejump-kill.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -O3 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=hexagon -O3 -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; ; Check that this testcase compiles successfully and that a new-value jump ; has been created. diff --git a/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll b/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll index 836394fef0a..d3dc7e7e213 100644 --- a/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll +++ b/llvm/test/CodeGen/Hexagon/packetize-impdef-1.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; Test that the compiler doesn't assert because IMPLICIT_DEF instructions are diff --git a/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll b/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll index 4a24ea62af4..2031a71c7e6 100644 --- a/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll +++ b/llvm/test/CodeGen/Hexagon/peephole-kill-flags.ll @@ -6,22 +6,21 @@ target triple = "hexagon" ; Function Attrs: nounwind -define void @fred() #0 { -entry: - br label %for.cond +define i32 @f0(i32* %a0, i32 %a1) #0 { +b0: + br label %b1 -for.cond: ; preds = %entry - %0 = load i32, i32* undef, align 4 - %mul = mul nsw i32 2, %0 - %cmp = icmp slt i32 undef, %mul - br i1 %cmp, label %for.body, label %for.end13 +b1: ; preds = %b0 + %v0 = load i32, i32* %a0, align 4 + %v1 = mul nsw i32 2, %v0 + %v2 = icmp slt i32 %a1, %v1 + br i1 %v2, label %b2, label %b3 -for.body: ; preds = %for.cond - unreachable +b2: ; preds = %b1 + ret i32 0 -for.end13: ; preds = %for.cond - ret void +b3: ; preds = %b1 + ret i32 %v1 } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" } - diff --git a/llvm/test/CodeGen/Hexagon/postinc-offset.ll b/llvm/test/CodeGen/Hexagon/postinc-offset.ll index 3b28001c2f3..cf2031b4e2c 100644 --- a/llvm/test/CodeGen/Hexagon/postinc-offset.ll +++ b/llvm/test/CodeGen/Hexagon/postinc-offset.ll @@ -6,36 +6,38 @@ ; CHECK-NOT: memw([[REG0]]+#0) = ; CHECK: } -define void @main() #0 { -cond.end.6: - store i32 -1, i32* undef, align 8, !tbaa !0 - br label %polly.stmt.for.body.i -if.then: +define void @f0(i32* %a0) #0 { +b0: + store i32 -1, i32* %a0, align 8, !tbaa !0 + br label %b4 + +b1: ; preds = %b3 unreachable -if.end: +b2: ; preds = %b3 ret void -polly.stmt.for.body.i24: - %0 = extractelement <2 x i32> %add.ip_vec, i32 1 - br i1 undef, label %if.end, label %if.then - -polly.stmt.for.body.i: - %add.ip_vec30 = phi <2 x i32> [ %add.ip_vec, %polly.stmt.for.body.i ], [ zeroinitializer, %cond.end.6 ] - %scevgep.phi = phi i32* [ %scevgep.inc, %polly.stmt.for.body.i ], [ undef, %cond.end.6 ] - %polly.indvar = phi i32 [ %polly.indvar_next, %polly.stmt.for.body.i ], [ 0, %cond.end.6 ] - %vector_ptr = bitcast i32* %scevgep.phi to <2 x i32>* - %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8 - %add.ip_vec = add <2 x i32> %_p_vec_full, %add.ip_vec30 - %polly.indvar_next = add nsw i32 %polly.indvar, 2 - %polly.loop_cond = icmp slt i32 %polly.indvar, 4 - %scevgep.inc = getelementptr i32, i32* %scevgep.phi, i32 2 - br i1 %polly.loop_cond, label %polly.stmt.for.body.i, label %polly.stmt.for.body.i24 +b3: ; preds = %b4 + %v0 = extractelement <2 x i32> %v6, i32 1 + br i1 undef, label %b2, label %b1 + +b4: ; preds = %b4, %b0 + %v1 = phi <2 x i32> [ %v6, %b4 ], [ zeroinitializer, %b0 ] + %v2 = phi i32* [ %v9, %b4 ], [ %a0, %b0 ] + %v3 = phi i32 [ %v7, %b4 ], [ 0, %b0 ] + %v4 = bitcast i32* %v2 to <2 x i32>* + %v5 = load <2 x i32>, <2 x i32>* %v4, align 8 + %v6 = add <2 x i32> %v5, %v1 + %v7 = add nsw i32 %v3, 2 + %v8 = icmp slt i32 %v3, 4 + %v9 = getelementptr i32, i32* %v2, i32 2 + br i1 %v8, label %b4, label %b3 } attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } -!0 = !{!"int", !1} -!1 = !{!"omnipotent char", !2} -!2 = !{!"Simple C/C++ TBAA"} +!0 = !{!1, !1, i64 0} +!1 = !{!"int", !2} +!2 = !{!"omnipotent char", !3} +!3 = !{!"Simple C/C++ TBAA"} diff --git a/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll b/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll index 5d947e6fe45..372e45dda18 100644 --- a/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll +++ b/llvm/test/CodeGen/Hexagon/rdf-extra-livein.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -march=hexagon -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Verify that the code compiles successfully. ; CHECK: call printf diff --git a/llvm/test/CodeGen/Hexagon/redundant-branching2.ll b/llvm/test/CodeGen/Hexagon/redundant-branching2.ll index 01b9e4dc08f..36505974d62 100644 --- a/llvm/test/CodeGen/Hexagon/redundant-branching2.ll +++ b/llvm/test/CodeGen/Hexagon/redundant-branching2.ll @@ -13,7 +13,7 @@ target triple = "hexagon-unknown--elf" declare void @f0() #0 ; Function Attrs: nounwind -define void @f1(i8* nocapture readonly %a0, i32 %a1) #0 { +define void @f1(i8* %a0, i32 %a1, i8* %a2, i32* %a3) #0 { b0: br i1 undef, label %b8, label %b1 @@ -28,7 +28,7 @@ b2: ; preds = %b1 br label %b3 b3: ; preds = %b6, %b2 - %v3 = phi i8* [ undef, %b2 ], [ %v17, %b6 ] + %v3 = phi i8* [ %a2, %b2 ], [ %v17, %b6 ] %v4 = phi i8* [ %v0, %b2 ], [ null, %b6 ] %v5 = phi i32 [ 1, %b2 ], [ 0, %b6 ] br i1 %v2, label %b4, label %b5 @@ -60,12 +60,12 @@ b6: ; preds = %b5, %b4 %v23 = lshr i32 %v22, 1 %v24 = add nuw nsw i32 %v23, %v19 %v25 = add nsw i32 %v24, 0 - store i32 %v25, i32* null, align 4 + store i32 %v25, i32* %a3, align 4 %v26 = icmp eq i32 %v5, undef br i1 %v26, label %b7, label %b3 b7: ; preds = %b6 - unreachable + ret void b8: ; preds = %b1, %b0 ret void diff --git a/llvm/test/CodeGen/Hexagon/rotl-i64.ll b/llvm/test/CodeGen/Hexagon/rotl-i64.ll index 31e5d5c43bb..353c7bafb71 100644 --- a/llvm/test/CodeGen/Hexagon/rotl-i64.ll +++ b/llvm/test/CodeGen/Hexagon/rotl-i64.ll @@ -2,26 +2,17 @@ ; CHECK: rol ; Function Attrs: nounwind -define fastcc void @f0() #0 { -b0: - br label %b1 - -b1: ; preds = %b1, %b0 - br i1 undef, label %b2, label %b1 - -b2: ; preds = %b1 - br label %b3 - -b3: ; preds = %b3, %b2 - %v0 = load i64, i64* undef, align 8, !tbaa !0 +define fastcc void @f0(i64* %a0) #0 { +b0: ; preds = %b3, %b2 + %v0 = load i64, i64* %a0, align 8, !tbaa !0 %v1 = lshr i64 %v0, 8 %v2 = shl i64 %v0, 56 %v3 = or i64 %v2, %v1 %v4 = xor i64 %v3, 0 %v5 = xor i64 %v4, 0 %v6 = add i64 0, %v5 - store i64 %v6, i64* undef, align 8, !tbaa !0 - br label %b3 + store i64 %v6, i64* %a0, align 8, !tbaa !0 + ret void } attributes #0 = { nounwind "target-cpu"="hexagonv60" } diff --git a/llvm/test/CodeGen/Hexagon/signed_immediates.ll b/llvm/test/CodeGen/Hexagon/signed_immediates.ll index ad4aa259660..334dbbeff79 100644 --- a/llvm/test/CodeGen/Hexagon/signed_immediates.ll +++ b/llvm/test/CodeGen/Hexagon/signed_immediates.ll @@ -83,9 +83,9 @@ define void @foo11(i64* %a, i64 %b) { } ; s12Ext -; CHECK: if (p0.new) r0 = #-1 +; CHECK: r1 = mux(p0,#-1,r0) define i32 @foo12(i32 %a, i1 %b) { -br i1 %b, label %x, label %y + br i1 %b, label %x, label %y x: ret i32 -1 y: diff --git a/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll b/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll index a5fd4dd6672..a21196c6806 100644 --- a/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll +++ b/llvm/test/CodeGen/Hexagon/store-widen-subreg.ll @@ -36,10 +36,10 @@ %s.27 = type { i16, i16, i32, i32, i32 } ; Function Attrs: nounwind -define hidden void @f0() local_unnamed_addr #0 { +define void @f0(i64* %a0, i1 %a1) #0 { b0: - %v0 = load i64, i64* undef, align 8 - br i1 undef, label %b1, label %b2 + %v0 = load i64, i64* %a0, align 8 + br i1 %a1, label %b1, label %b2 b1: ; preds = %b0 %v1 = trunc i64 %v0 to i32 @@ -50,10 +50,10 @@ b1: ; preds = %b0 store i8 1, i8* %v4, align 1 %v5 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 6 store i8 1, i8* %v5, align 1 - unreachable + ret void b2: ; preds = %b0 - unreachable + ret void } attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll index e0aff5cb28c..641c61d5e4b 100644 --- a/llvm/test/CodeGen/Hexagon/swp-carried-1.ll +++ b/llvm/test/CodeGen/Hexagon/swp-carried-1.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -rdf-opt=0 -disable-hexagon-misched < %s | FileCheck %s +; RUN: llc -march=hexagon -rdf-opt=0 -disable-hexagon-misched -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that we generate the correct code when a loop carried value ; is scheduled one stage earlier than it's use. The code in diff --git a/llvm/test/CodeGen/Hexagon/swp-change-deps.ll b/llvm/test/CodeGen/Hexagon/swp-change-deps.ll index cf9dc79ad69..4eebfb6d6f5 100644 --- a/llvm/test/CodeGen/Hexagon/swp-change-deps.ll +++ b/llvm/test/CodeGen/Hexagon/swp-change-deps.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that we generate the correct offsets for loads in the prolog ; after removing dependences on a post-increment instructions of the diff --git a/llvm/test/CodeGen/Hexagon/swp-check-offset.ll b/llvm/test/CodeGen/Hexagon/swp-check-offset.ll index 2654aaabe74..220ebde0f86 100644 --- a/llvm/test/CodeGen/Hexagon/swp-check-offset.ll +++ b/llvm/test/CodeGen/Hexagon/swp-check-offset.ll @@ -15,13 +15,13 @@ ; CHECK-V65: }{{[ \t]*}}:mem_noshuf ; Function Attrs: nounwind -define void @f0() #0 { +define i32 @f0(i8** %a0) #0 { b0: - br i1 undef, label %b1, label %b4 + br label %b1 b1: ; preds = %b1, %b0 %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ] - %v1 = getelementptr inbounds i8*, i8** undef, i32 %v0 + %v1 = getelementptr inbounds i8*, i8** %a0, i32 %v0 %v2 = load i8*, i8** %v1, align 4 %v3 = bitcast i8* %v2 to i32* store i32 0, i32* %v3, align 4 @@ -34,13 +34,7 @@ b1: ; preds = %b1, %b0 br i1 %v8, label %b2, label %b1 b2: ; preds = %b1 - br i1 undef, label %b3, label %b4 - -b3: ; preds = %b2 - unreachable - -b4: ; preds = %b2, %b0 - unreachable + ret i32 %v7 } attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll b/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll index 74f757f2be1..26ad82a20c4 100644 --- a/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll +++ b/llvm/test/CodeGen/Hexagon/swp-const-tc1.ll @@ -1,5 +1,5 @@ ; RUN: llc -march=hexagon -enable-pipeliner -enable-pipeliner-opt-size \ -; RUN: -verify-machineinstrs \ +; RUN: -verify-machineinstrs -hexagon-initial-cfg-cleanup=0 \ ; RUN: -enable-aa-sched-mi=false -hexagon-expand-condsets=0 \ ; RUN: < %s | FileCheck %s diff --git a/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll b/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll index c0572105dbe..7ba4286bf41 100644 --- a/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll +++ b/llvm/test/CodeGen/Hexagon/swp-dep-neg-offset.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -enable-pipeliner < %s | FileCheck %s +; RUN: llc -march=hexagon -enable-pipeliner -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that the code that changes the dependences does not allow ; a load with a negative offset to be overlapped with the post diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll index 7b3420ab7ef..36805c89037 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi2.ll @@ -14,9 +14,9 @@ %s.11 = type { i32, i32, i8* } ; Function Attrs: nounwind -define void @f0(%s.0* %a0) #0 { +define void @f0(%s.0* %a0, i8* %a1, i16* %a2, i16** %a3, i16** %a4) #0 { b0: - %v0 = load i8, i8* undef, align 1, !tbaa !0 + %v0 = load i8, i8* %a1, align 1, !tbaa !0 %v1 = icmp eq i8 %v0, 1 br i1 %v1, label %b1, label %b2 @@ -26,7 +26,7 @@ b0: ; CHECK: }{{[ \t]*}}:endloop0 b1: ; preds = %b1, %b0 - %v2 = phi i16* [ %v17, %b1 ], [ undef, %b0 ] + %v2 = phi i16* [ %v17, %b1 ], [ %a2, %b0 ] %v3 = phi i32 [ %v18, %b1 ], [ 0, %b0 ] %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32 10, i32 %v3 %v5 = load i8, i8* %v4, align 1, !tbaa !0 @@ -35,16 +35,16 @@ b1: ; preds = %b1, %b0 %v8 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 25, i32 10, i32 %v7 %v9 = load i8, i8* %v8, align 1, !tbaa !0 %v10 = or i16 0, %v6 - %v11 = load i8, i8* undef, align 1, !tbaa !0 + %v11 = load i8, i8* %a1, align 1, !tbaa !0 %v12 = zext i8 %v11 to i16 %v13 = shl nuw i16 %v12, 8 %v14 = or i16 %v10, %v13 %v15 = or i16 %v14, 0 %v16 = getelementptr inbounds i16, i16* %v2, i32 1 - store i16* %v16, i16** null, align 4, !tbaa !3 + store i16* %v16, i16** %a3, align 4, !tbaa !3 store i16 %v15, i16* %v2, align 2, !tbaa !5 %v17 = getelementptr inbounds i16, i16* %v2, i32 2 - store i16* %v17, i16** null, align 4, !tbaa !3 + store i16* %v17, i16** %a4, align 4, !tbaa !3 store i16 0, i16* %v16, align 2, !tbaa !5 %v18 = add nsw i32 %v3, 8 %v19 = icmp slt i32 %v18, undef diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll index 101d606a028..e85ea7654e0 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi4.ll @@ -8,16 +8,16 @@ ; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}} ; Function Attrs: nounwind -define void @f0(i32 %a0, i32* %a1, [1000 x i32]* %a2) #0 { +define void @f0(i32 %a0, i32* %a1, [1000 x i32]* %a2, i32* %a3, i32* %a4) #0 { b0: - br i1 undef, label %b1, label %b3 + br label %b1 b1: ; preds = %b1, %b0 %v0 = phi i32 [ %v8, %b1 ], [ 1, %b0 ] - %v1 = load i32, i32* null, align 4, !tbaa !0 + %v1 = load i32, i32* %a3, align 4, !tbaa !0 %v2 = getelementptr inbounds i32, i32* %a1, i32 %v0 %v3 = load i32, i32* %v2, align 4, !tbaa !0 - %v4 = load i32, i32* undef, align 4, !tbaa !0 + %v4 = load i32, i32* %a4, align 4, !tbaa !0 %v5 = mul nsw i32 %v4, %v3 %v6 = add nsw i32 %v5, %v1 %v7 = getelementptr inbounds [1000 x i32], [1000 x i32]* %a2, i32 %v0, i32 0 @@ -26,10 +26,7 @@ b1: ; preds = %b1, %b0 %v9 = icmp eq i32 %v8, %a0 br i1 %v9, label %b2, label %b1 -b2: ; preds = %b1 - unreachable - -b3: ; preds = %b0 +b2: ; preds = %b0 ret void } diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll index b71eb879fa5..cb0bde74c85 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi6.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -O2 -debug-only=pipeliner < %s -o - 2>&1 > /dev/null | FileCheck %s +; RUN: llc -march=hexagon -O2 -debug-only=pipeliner -hexagon-initial-cfg-cleanup=0 < %s -o - 2>&1 > /dev/null | FileCheck %s ; REQUIRES: asserts ; Test that the phi in the first epilog block is getter the correct diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll index 1d2c2e689d0..a46e86b0b36 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi8.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -mno-pairing -mno-compound < %s | FileCheck %s +; RUN: llc -march=hexagon -mno-pairing -mno-compound -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that we generate the correct phi names in the epilog when the pipeliner ; schedules a phi and it's loop definition in different stages, e.g., a phi is diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll index db92a33b559..2e268d74b93 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi9.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that we generate the correct Phi name in the last couple of epilog ; blocks, when there are 3 epilog blocks. The Phi was scheduled in stage diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phis.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phis.ll index f877eec4754..1073f1c46b1 100644 --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phis.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phis.ll @@ -1,5 +1,6 @@ ; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 \ -; RUN: -pipeliner-ignore-recmii -disable-hexagon-nv-schedule -stats -o /dev/null\ +; RUN: -pipeliner-ignore-recmii -disable-hexagon-nv-schedule \ +; RUN: -hexagon-initial-cfg-cleanup=0 -stats -o /dev/null \ ; RUN: -enable-aa-sched-mi < %s 2>&1 | FileCheck %s --check-prefix=STATS ; REQUIRES: asserts ; diff --git a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll index 315f554df1a..681e7492337 100644 --- a/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll +++ b/llvm/test/CodeGen/Hexagon/swp-kernel-phi1.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -enable-pipeliner-opt-size < %s | FileCheck %s +; RUN: llc -march=hexagon -enable-pipeliner-opt-size -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that we generate the correct names for the phis in the kernel for the ; incoming values. In this case, the loop contains a phi and has another phi diff --git a/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll b/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll index 3f8abf0bc57..b95e6241956 100644 --- a/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll +++ b/llvm/test/CodeGen/Hexagon/swp-loop-carried-unknown.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; Test that the pipeliner schedules a store before the load in which there is a ; loop carried dependence. Previously, the loop carried dependence wasn't added diff --git a/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll b/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll index 8faddc17c87..a657f92c5d5 100644 --- a/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll +++ b/llvm/test/CodeGen/Hexagon/swp-lots-deps.ll @@ -4,7 +4,7 @@ ; STATS: 1 pipeliner - Number of loops software pipelined ; Function Attrs: nounwind -define void @f0(i32 %a0) #0 { +define i64 @f0(i32 %a0, i32* %a1) #0 { b0: %v0 = icmp slt i32 %a0, 123469 br i1 %v0, label %b1, label %b4 @@ -21,7 +21,7 @@ b2: ; preds = %b2, %b1 %v6 = phi i64 [ undef, %b1 ], [ %v11, %b2 ] %v7 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v5, i64 %v6, i64 %v6) %v8 = tail call i64 @llvm.hexagon.S2.packhl(i32 undef, i32 %v4) - %v9 = load i32, i32* undef, align 4, !tbaa !0 + %v9 = load i32, i32* %a1, align 4, !tbaa !0 %v10 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v2, i64 %v6, i64 %v8) %v11 = tail call i64 @llvm.hexagon.S2.packhl(i32 %v9, i32 undef) %v12 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %v1, i64 %v6, i64 %v11) @@ -36,7 +36,8 @@ b3: ; preds = %b2 b4: ; preds = %b3, %b0 %v16 = phi i64 [ %v10, %b3 ], [ undef, %b0 ] %v17 = phi i64 [ %v7, %b3 ], [ undef, %b0 ] - unreachable + %v18 = add i64 %v16, %v17 + ret i64 %v18 } ; Function Attrs: nounwind readnone diff --git a/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll b/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll index c7ed09df48b..38b56c1126a 100644 --- a/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll +++ b/llvm/test/CodeGen/Hexagon/swp-phi-dep.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -enable-pipeliner -pipeliner-max-stages=2 < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 -enable-pipeliner -pipeliner-max-stages=2 < %s | FileCheck %s ; Check that the pipelined code uses the proper address in the ; prolog and the kernel. The bug occurs when the address computation diff --git a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll index 5b58cd271a9..7371ed10a71 100644 --- a/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll +++ b/llvm/test/CodeGen/Hexagon/swp-reuse-phi-6.ll @@ -13,20 +13,20 @@ ; CHECK: }{{[ \t]*}}:endloop0 ; Function Attrs: nounwind -define void @f0(i32 %a0, i32 %a1) local_unnamed_addr #0 { +define void @f0(i32 %a0, i32 %a1, i8* %a2, <16 x i32>* %a3) #0 { b0: %v0 = shl nsw i32 %a0, 1 %v1 = sub i32 0, %v0 %v2 = sub i32 0, %a0 - %v3 = getelementptr inbounds i8, i8* undef, i32 %v1 - %v4 = getelementptr inbounds i8, i8* undef, i32 %v2 - %v5 = getelementptr inbounds i8, i8* undef, i32 %a0 - %v6 = getelementptr inbounds i8, i8* undef, i32 %v0 + %v3 = getelementptr inbounds i8, i8* %a2, i32 %v1 + %v4 = getelementptr inbounds i8, i8* %a2, i32 %v2 + %v5 = getelementptr inbounds i8, i8* %a2, i32 %a0 + %v6 = getelementptr inbounds i8, i8* %a2, i32 %v0 %v7 = getelementptr inbounds i8, i8* %v6, i32 64 %v8 = bitcast i8* %v7 to <16 x i32>* %v9 = getelementptr inbounds i8, i8* %v5, i32 64 %v10 = bitcast i8* %v9 to <16 x i32>* - %v11 = getelementptr inbounds i8, i8* undef, i32 64 + %v11 = getelementptr inbounds i8, i8* %a2, i32 64 %v12 = bitcast i8* %v11 to <16 x i32>* %v13 = getelementptr inbounds i8, i8* %v4, i32 64 %v14 = bitcast i8* %v13 to <16 x i32>* @@ -35,7 +35,7 @@ b0: br label %b1 b1: ; preds = %b1, %b0 - %v17 = phi <16 x i32>* [ %v59, %b1 ], [ undef, %b0 ] + %v17 = phi <16 x i32>* [ %v59, %b1 ], [ %a3, %b0 ] %v18 = phi <16 x i32>* [ %v34, %b1 ], [ %v8, %b0 ] %v19 = phi <16 x i32>* [ %v32, %b1 ], [ %v10, %b0 ] %v20 = phi <16 x i32>* [ %v30, %b1 ], [ %v12, %b0 ] diff --git a/llvm/test/CodeGen/Hexagon/swp-tfri.ll b/llvm/test/CodeGen/Hexagon/swp-tfri.ll index e80fc357db1..66b999e5590 100644 --- a/llvm/test/CodeGen/Hexagon/swp-tfri.ll +++ b/llvm/test/CodeGen/Hexagon/swp-tfri.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -enable-pipeliner -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS +; RUN: llc -march=hexagon -enable-pipeliner -hexagon-initial-cfg-cleanup=0 -stats -o /dev/null < %s 2>&1 | FileCheck %s --check-prefix=STATS ; REQUIRES: asserts ; Check that we handle the case when a value is first defined in the loop. diff --git a/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll b/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll index 148d018b01e..652a000fd7a 100644 --- a/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll +++ b/llvm/test/CodeGen/Hexagon/tail-dup-subreg-map.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; REQUIRES: asserts ; When tail-duplicating a block with PHI nodes that use subregisters, the diff --git a/llvm/test/CodeGen/Hexagon/tfr-mux-nvj.ll b/llvm/test/CodeGen/Hexagon/tfr-mux-nvj.ll index fe0baa0d60a..1bffc8e63ea 100644 --- a/llvm/test/CodeGen/Hexagon/tfr-mux-nvj.ll +++ b/llvm/test/CodeGen/Hexagon/tfr-mux-nvj.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon -O2 -hexagon-expand-condsets=0 < %s | FileCheck %s +; RUN: llc -march=hexagon -O2 -hexagon-expand-condsets=0 -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; CHECK: mux ; CHECK: cmp{{.*\.new}} diff --git a/llvm/test/CodeGen/Hexagon/v60-cur.ll b/llvm/test/CodeGen/Hexagon/v60-cur.ll index d0ffe1d8fdd..0f3204de325 100644 --- a/llvm/test/CodeGen/Hexagon/v60-cur.ll +++ b/llvm/test/CodeGen/Hexagon/v60-cur.ll @@ -4,44 +4,45 @@ ; CHECK: v{{[0-9]*}}.cur -define void @conv3x3_i(i8* noalias nocapture readonly %iptr0, i32 %shift, i32 %width) #0 { -entry: - br i1 undef, label %for.body.lr.ph, label %for.end - -for.body.lr.ph: - br label %for.body - -for.body: - %iptr0.pn = phi i8* [ %iptr0, %for.body.lr.ph ], [ %iptr0.addr.0121, %for.body ] - %j.0115 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ] - %sline000.0114 = phi <16 x i32> [ zeroinitializer, %for.body.lr.ph ], [ %1, %for.body ] - %sline100.0113 = phi <16 x i32> [ zeroinitializer, %for.body.lr.ph ], [ zeroinitializer, %for.body ] - %iptr0.addr.0121 = getelementptr inbounds i8, i8* %iptr0.pn, i32 64 - %0 = bitcast i8* %iptr0.addr.0121 to <16 x i32>* - %1 = load <16 x i32>, <16 x i32>* %0, align 64, !tbaa !1 - %2 = load <16 x i32>, <16 x i32>* null, align 64, !tbaa !1 - %3 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %1, <16 x i32> %sline000.0114, i32 4) - %4 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> zeroinitializer, <16 x i32> %sline100.0113, i32 4) - %5 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %2, <16 x i32> zeroinitializer, i32 4) - %6 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %3, <16 x i32> %sline000.0114) - %7 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %5, <16 x i32> zeroinitializer) - %8 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %6, i32 0, i32 0) - %9 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %8, <32 x i32> zeroinitializer, i32 undef, i32 0) - %10 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %9, <32 x i32> undef, i32 undef, i32 0) - %11 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %10) - %12 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %11, <16 x i32> undef, i32 %shift) - %13 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> undef, <16 x i32> %12) - store <16 x i32> %13, <16 x i32>* undef, align 64, !tbaa !1 - %14 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> zeroinitializer, <32 x i32> %7, i32 undef, i32 1) - %15 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %14) - %16 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %15, <16 x i32> undef, i32 %shift) - %17 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %16, <16 x i32> undef) - store <16 x i32> %17, <16 x i32>* undef, align 64, !tbaa !1 - %add = add nsw i32 %j.0115, 64 - %cmp = icmp slt i32 %add, %width - br i1 %cmp, label %for.body, label %for.end - -for.end: +; Function Attrs: nounwind +define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, <16 x i32>* %a3, <16 x i32>* %a4) #0 { +b0: + br i1 undef, label %b1, label %b3 + +b1: ; preds = %b0 + br label %b2 + +b2: ; preds = %b2, %b1 + %v0 = phi i8* [ %a0, %b1 ], [ %v4, %b2 ] + %v1 = phi i32 [ 0, %b1 ], [ %v23, %b2 ] + %v2 = phi <16 x i32> [ zeroinitializer, %b1 ], [ %v6, %b2 ] + %v3 = phi <16 x i32> [ zeroinitializer, %b1 ], [ zeroinitializer, %b2 ] + %v4 = getelementptr inbounds i8, i8* %v0, i32 64 + %v5 = bitcast i8* %v4 to <16 x i32>* + %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0 + %v7 = load <16 x i32>, <16 x i32>* %a3, align 64, !tbaa !0 + %v8 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v6, <16 x i32> %v2, i32 4) + %v9 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> zeroinitializer, <16 x i32> %v3, i32 4) + %v10 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v7, <16 x i32> zeroinitializer, i32 4) + %v11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v8, <16 x i32> %v2) + %v12 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v10, <16 x i32> zeroinitializer) + %v13 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 0, i32 0) + %v14 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v13, <32 x i32> zeroinitializer, i32 undef, i32 0) + %v15 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %v14, <32 x i32> undef, i32 undef, i32 0) + %v16 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v15) + %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v16, <16 x i32> undef, i32 %a1) + %v18 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> undef, <16 x i32> %v17) + store <16 x i32> %v18, <16 x i32>* %a3, align 64, !tbaa !0 + %v19 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> zeroinitializer, <32 x i32> %v12, i32 undef, i32 1) + %v20 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v19) + %v21 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v20, <16 x i32> undef, i32 %a1) + %v22 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v21, <16 x i32> undef) + store <16 x i32> %v22, <16 x i32>* %a4, align 64, !tbaa !0 + %v23 = add nsw i32 %v1, 64 + %v24 = icmp slt i32 %v23, %a2 + br i1 %v24, label %b2, label %b3 + +b3: ; preds = %b2, %b0 ret void } @@ -56,6 +57,6 @@ declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" } attributes #1 = { nounwind readnone } -!1 = !{!2, !2, i64 0} -!2 = !{!"omnipotent char", !3, i64 0} -!3 = !{!"Simple C/C++ TBAA"} +!0 = !{!1, !1, i64 0} +!1 = !{!"omnipotent char", !2, i64 0} +!2 = !{!"Simple C/C++ TBAA"} diff --git a/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll b/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll index 396e7138f75..de91ac33265 100644 --- a/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll +++ b/llvm/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll @@ -6,33 +6,15 @@ target triple = "hexagon-unknown--elf" ; Function Attrs: norecurse nounwind -define void @f0() #0 { -b0: - br i1 undef, label %b6, label %b1 - -b1: ; preds = %b0 - br i1 undef, label %b2, label %b6, !prof !1 - -b2: ; preds = %b1 - br label %b3 - -b3: ; preds = %b5, %b2 - br i1 undef, label %b4, label %b5, !prof !1 - -b4: ; preds = %b3 - %v0 = load <64 x i8>, <64 x i8>* undef, align 1 +define void @f0(<64 x i8>* %a0) #0 { +b0: ; preds = %b3 + %v0 = load <64 x i8>, <64 x i8>* %a0, align 1 %v1 = shufflevector <64 x i8> %v0, <64 x i8> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> %v2 = bitcast <128 x i8> %v1 to <32 x i32> %v3 = tail call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> undef, <32 x i32> %v2) %v4 = bitcast <32 x i32> %v3 to <128 x i8> %v5 = shufflevector <128 x i8> %v4, <128 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63> - store <64 x i8> %v5, <64 x i8>* undef, align 1, !tbaa !2 - br label %b5 - -b5: ; preds = %b4, %b3 - br i1 undef, label %b6, label %b3 - -b6: ; preds = %b5, %b1, %b0 + store <64 x i8> %v5, <64 x i8>* %a0, align 1, !tbaa !2 ret void } diff --git a/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll b/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll index 0facdc33555..f33756b76e0 100644 --- a/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll +++ b/llvm/test/CodeGen/Hexagon/vassign-to-combine.ll @@ -16,41 +16,40 @@ declare <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32>, <64 x i32>) #0 declare <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32>, <32 x i32>) #0 declare <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32>, i32) #0 -define void @foo() local_unnamed_addr #1 { -entry: - %0 = load <32 x i32>, <32 x i32>* undef, align 128 - %1 = load <32 x i32>, <32 x i32>* null, align 128 +define void @f0(<32 x i32>* %a0, <32 x i32>* %a1) local_unnamed_addr #1 { +b0: + %v0 = load <32 x i32>, <32 x i32>* %a0, align 128 + %v1 = load <32 x i32>, <32 x i32>* %a1, align 128 br i1 undef, label %b2, label %b1 -b1: ; preds = %entry - %2 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %0, <32 x i32> %1, i32 1) - %3 = tail call <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32> %2, i32 33686018) #1 - %4 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %3) #1 - %5 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %4) - %6 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> %5, <32 x i32> undef) #1 - %7 = tail call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %6, <32 x i32> undef) - %8 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %7) #1 - %9 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %8) #1 - %10 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %9, <32 x i32> undef) #1 - store <32 x i32> %10, <32 x i32>* undef, align 128 +b1: ; preds = %b0 + %v2 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v0, <32 x i32> %v1, i32 1) + %v3 = tail call <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32> %v2, i32 33686018) #1 + %v4 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %v3) #1 + %v5 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v4) + %v6 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> %v5, <32 x i32> undef) #1 + %v7 = tail call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v6, <32 x i32> undef) + %v8 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v7) #1 + %v9 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v8) #1 + %v10 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v9, <32 x i32> undef) #1 + store <32 x i32> %v10, <32 x i32>* %a0, align 128 br label %b2 -b2: ; preds = %b1, %entry - %c2.host31.sroa.3.2.unr.ph = phi <32 x i32> [ zeroinitializer, %b1 ], [ %0, %entry ] - %c2.host31.sroa.0.2.unr.ph = phi <32 x i32> [ %0, %b1 ], [ %1, %entry ] - %11 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %c2.host31.sroa.3.2.unr.ph, <32 x i32> %c2.host31.sroa.0.2.unr.ph, i32 1) - %12 = tail call <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32> %11, i32 33686018) #1 - %13 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %12) #1 - %14 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %13) - %15 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> %14, <32 x i32> undef) #1 - %16 = tail call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %15, <32 x i32> undef) - %17 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %16) #1 - %18 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %17) #1 - %19 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %18, <32 x i32> undef) #1 - store <32 x i32> %19, <32 x i32>* undef, align 128 +b2: ; preds = %b1, %b0 + %v11 = phi <32 x i32> [ zeroinitializer, %b1 ], [ %v0, %b0 ] + %v12 = phi <32 x i32> [ %v0, %b1 ], [ %v1, %b0 ] + %v13 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v11, <32 x i32> %v12, i32 1) + %v14 = tail call <64 x i32> @llvm.hexagon.V6.vmpyub.128B(<32 x i32> %v13, i32 33686018) #1 + %v15 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %v14) #1 + %v16 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v15) + %v17 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> %v16, <32 x i32> undef) #1 + %v18 = tail call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v17, <32 x i32> undef) + %v19 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v18) #1 + %v20 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v19) #1 + %v21 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v20, <32 x i32> undef) #1 + store <32 x i32> %v21, <32 x i32>* %a1, align 128 ret void } attributes #0 = { nounwind readnone } -attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length128b" } - +attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" } diff --git a/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll b/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll index 9a71b2473a7..5bc65f00457 100644 --- a/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll +++ b/llvm/test/CodeGen/Hexagon/vect/vect-bool-isel-crash.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred() #0 { +define void @fred(i32* %a0, i8* %a1) #0 { b0: %v1 = icmp sgt <8 x i32> undef, undef %v2 = extractelement <8 x i1> %v1, i32 4 @@ -18,13 +18,13 @@ b0: %v8 = add nsw i32 %v7, %v5 %v9 = add nsw i32 0, %v8 %v10 = add nsw i32 0, %v9 - %v11 = load i32, i32* undef, align 4 + %v11 = load i32, i32* %a0, align 4 %v12 = mul nsw i32 %v11, %v10 %v13 = add nsw i32 %v12, 16384 %v14 = ashr i32 %v13, 15 %v15 = select i1 undef, i32 %v14, i32 255 %v16 = trunc i32 %v15 to i8 - store i8 %v16, i8* undef, align 1 + store i8 %v16, i8* %a1, align 1 ret void } diff --git a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll index 594edece546..bcec15437e9 100644 --- a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll +++ b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" target triple = "hexagon" -define void @fred(<8 x i16>* %a0) #0 { +define i32 @fred(<8 x i16>* %a0) #0 { b0: switch i32 undef, label %b14 [ i32 5, label %b2 @@ -31,10 +31,10 @@ b2: ; preds = %b0 br i1 %v12, label %b14, label %b13 b13: ; preds = %b2 - unreachable + ret i32 1 b14: ; preds = %b2, %b1, %b0 - ret void + ret i32 0 } attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-length64b,+hvxv60" } diff --git a/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll b/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll index a3bed31071d..35381bd94ea 100644 --- a/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll +++ b/llvm/test/CodeGen/Hexagon/vload-postinc-sel.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s +; RUN: llc -march=hexagon -hexagon-initial-cfg-cleanup=0 < %s | FileCheck %s ; CHECK: = vmem(r{{[0-9]+}}++#1) target triple = "hexagon-unknown--elf" @@ -11,39 +11,33 @@ declare <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32>, <64 x i32>) #0 declare <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32>, <64 x i32>) #0 declare <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32>, <32 x i32>) #0 -define void @fred() #1 { -entry: - br i1 undef, label %b1, label %call_destructor.exit - -b1: ; preds = %entry - br label %b2 - -b2: ; preds = %b1, %b2 - %c2.host32.sroa.3.0 = phi <128 x i8> [ %5, %b2 ], [ undef, %b1 ] - %sobel_halide.s0.x.x = phi i32 [ %17, %b2 ], [ 0, %b1 ] - %0 = add nsw i32 %sobel_halide.s0.x.x, undef - %1 = shl i32 %0, 7 - %2 = add nsw i32 %1, 128 - %3 = getelementptr inbounds i8, i8* undef, i32 %2 - %4 = bitcast i8* %3 to <128 x i8>* - %5 = load <128 x i8>, <128 x i8>* %4, align 128 - %6 = bitcast <128 x i8> %c2.host32.sroa.3.0 to <32 x i32> - %7 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> %6, i32 1) - %8 = tail call <64 x i32> @llvm.hexagon.V6.vzb.128B(<32 x i32> %7) #1 - %9 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %8) #1 - %10 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> %9, <64 x i32> undef) #1 - %11 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %10) - %12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> undef, <32 x i32> %11) #1 - %13 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %12, <32 x i32> undef) - %14 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %13) #1 - %15 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %14) #1 - %16 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %15, <32 x i32> undef) #1 - store <32 x i32> %16, <32 x i32>* undef, align 128 - %17 = add nuw nsw i32 %sobel_halide.s0.x.x, 1 - br label %b2 - -call_destructor.exit: ; preds = %entry - ret void +define void @f0(i8* %a0, <32 x i32>* %a1) #1 { +b0: + br label %b1 + +b1: ; preds = %b2, %b1 + %v0 = phi <128 x i8> [ %v7, %b1 ], [ undef, %b0 ] + %v1 = phi i32 [ %v19, %b1 ], [ 0, %b0 ] + %v2 = add nsw i32 %v1, undef + %v3 = shl i32 %v2, 7 + %v4 = add nsw i32 %v3, 128 + %v5 = getelementptr inbounds i8, i8* %a0, i32 %v4 + %v6 = bitcast i8* %v5 to <128 x i8>* + %v7 = load <128 x i8>, <128 x i8>* %v6, align 128 + %v8 = bitcast <128 x i8> %v0 to <32 x i32> + %v9 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> %v8, i32 1) + %v10 = tail call <64 x i32> @llvm.hexagon.V6.vzb.128B(<32 x i32> %v9) #1 + %v11 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> undef, <64 x i32> %v10) #1 + %v12 = tail call <64 x i32> @llvm.hexagon.V6.vadduhsat.dv.128B(<64 x i32> %v11, <64 x i32> undef) #1 + %v13 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v12) + %v14 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffuh.128B(<32 x i32> undef, <32 x i32> %v13) #1 + %v15 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v14, <32 x i32> undef) + %v16 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v15) #1 + %v17 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v16) #1 + %v18 = tail call <32 x i32> @llvm.hexagon.V6.vsathub.128B(<32 x i32> %v17, <32 x i32> undef) #1 + store <32 x i32> %v18, <32 x i32>* %a1, align 128 + %v19 = add nuw nsw i32 %v1, 1 + br label %b1 } declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #0 |