diff options
23 files changed, 294 insertions, 174 deletions
diff --git a/mlir/bindings/python/test/test_py2and3.py b/mlir/bindings/python/test/test_py2and3.py index 02f8f628046..2c1158f7174 100644 --- a/mlir/bindings/python/test/test_py2and3.py +++ b/mlir/bindings/python/test/test_py2and3.py @@ -274,7 +274,7 @@ class EdscTest: printWithCurrentFunctionName(str(self.module)) # CHECK-LABEL: testDivisions # CHECK: floordiv 42 - # CHECK: divis %{{.*}}, %{{.*}} : i32 + # CHECK: divi_signed %{{.*}}, %{{.*}} : i32 def testFunctionArgs(self): self.setUp() diff --git a/mlir/include/mlir/Dialect/StandardOps/Ops.td b/mlir/include/mlir/Dialect/StandardOps/Ops.td index 76c2ba57ea6..c26baf6a76e 100644 --- a/mlir/include/mlir/Dialect/StandardOps/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/Ops.td @@ -698,12 +698,12 @@ def DivFOp : FloatArithmeticOp<"divf"> { let summary = "floating point division operation"; } -def DivISOp : IntArithmeticOp<"divis"> { +def SignedDivIOp : IntArithmeticOp<"divi_signed"> { let summary = "signed integer division operation"; let hasFolder = 1; } -def DivIUOp : IntArithmeticOp<"diviu"> { +def UnsignedDivIOp : IntArithmeticOp<"divi_unsigned"> { let summary = "unsigned integer division operation"; let hasFolder = 1; } @@ -1002,12 +1002,12 @@ def RemFOp : FloatArithmeticOp<"remf"> { let summary = "floating point division remainder operation"; } -def RemISOp : IntArithmeticOp<"remis"> { +def SignedRemIOp : IntArithmeticOp<"remi_signed"> { let summary = "signed integer division remainder operation"; let hasFolder = 1; } -def RemIUOp : IntArithmeticOp<"remiu"> { +def UnsignedRemIOp : IntArithmeticOp<"remi_unsigned"> { let summary = "unsigned integer division remainder operation"; let hasFolder = 1; } @@ -1102,8 +1102,45 @@ def SignExtendIOp : Std_Op<"sexti", }]; } -def ShlISOp : IntArithmeticOp<"shlis"> { - let summary = "signed integer shift left"; +def ShiftLeftOp : IntArithmeticOp<"shift_left"> { + let summary = "integer left-shift"; + let description = [{ + The shift_left operation shifts an integer value to the left by a variable + amount. The low order bits are filled with zeros. + + %1 = constant 5 : i8 // %1 is 0b00000101 + %2 = constant 3 : i8 + %3 = shift_left %1, %2 : (i8, i8) -> i8 // %3 is 0b00101000 + }]; +} + +def SignedShiftRightOp : IntArithmeticOp<"shift_right_signed"> { + let summary = "signed integer right-shift"; + let description = [{ + The shift_right_signed operation shifts an integer value to the right by + a variable amount. The integer is interpreted as signed. The high order + bits in the output are filled with copies of the most-significant bit + of the shifted value (which means that the sign of the value is preserved). + + %1 = constant 160 : i8 // %1 is 0b10100000 + %2 = constant 3 : i8 + %3 = shift_right_signed %1, %2 : (i8, i8) -> i8 // %3 is 0b11110100 + %4 = constant 96 : i8 // %4 is 0b01100000 + %5 = shift_right_signed %4, %2 : (i8, i8) -> i8 // %5 is 0b00001100 + }]; +} + +def UnsignedShiftRightOp : IntArithmeticOp<"shift_right_unsigned"> { + let summary = "unsigned integer right-shift"; + let description = [{ + The shift_right_unsigned operation shifts an integer value to the right by + a variable amount. The integer is interpreted as unsigned. The high order + bits are always filled with zeros. + + %1 = constant 160 : i8 // %1 is 0b10100000 + %2 = constant 3 : i8 + %3 = shift_right_unsigned %1, %2 : (i8, i8) -> i8 // %3 is 0b00010100 + }]; } def SIToFPOp : CastOp<"sitofp">, Arguments<(ins AnyType:$in)> { diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp index 9208ce8ab6d..3f613c6bfb5 100644 --- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp +++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp @@ -94,7 +94,7 @@ public: auto rhs = visit(expr.getRHS()); assert(lhs && rhs && "unexpected affine expr lowering failure"); - Value *remainder = builder.create<RemISOp>(loc, lhs, rhs); + Value *remainder = builder.create<SignedRemIOp>(loc, lhs, rhs); Value *zeroCst = builder.create<ConstantIndexOp>(loc, 0); Value *isRemainderNegative = builder.create<CmpIOp>(loc, CmpIPredicate::slt, remainder, zeroCst); @@ -138,7 +138,7 @@ public: Value *negatedDecremented = builder.create<SubIOp>(loc, noneCst, lhs); Value *dividend = builder.create<SelectOp>(loc, negative, negatedDecremented, lhs); - Value *quotient = builder.create<DivISOp>(loc, dividend, rhs); + Value *quotient = builder.create<SignedDivIOp>(loc, dividend, rhs); Value *correctedQuotient = builder.create<SubIOp>(loc, noneCst, quotient); Value *result = builder.create<SelectOp>(loc, negative, correctedQuotient, quotient); @@ -178,7 +178,7 @@ public: Value *decremented = builder.create<SubIOp>(loc, lhs, oneCst); Value *dividend = builder.create<SelectOp>(loc, nonPositive, negated, decremented); - Value *quotient = builder.create<DivISOp>(loc, dividend, rhs); + Value *quotient = builder.create<SignedDivIOp>(loc, dividend, rhs); Value *negatedQuotient = builder.create<SubIOp>(loc, zeroCst, quotient); Value *incrementedQuotient = builder.create<AddIOp>(loc, quotient, oneCst); Value *result = builder.create<SelectOp>(loc, nonPositive, negatedQuotient, diff --git a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp index c269dc5c45a..d663ae105f2 100644 --- a/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp +++ b/mlir/lib/Conversion/LoopsToGPU/LoopsToGPU.cpp @@ -254,7 +254,7 @@ Optional<OpTy> LoopToGpuConverter::collectBounds(OpTy forOp, builder.create<SubIOp>(currentLoop.getLoc(), upperBound, lowerBound); Value *step = getOrCreateStep(currentLoop, builder); if (!isConstantOne(step)) - range = builder.create<DivISOp>(currentLoop.getLoc(), range, step); + range = builder.create<SignedDivIOp>(currentLoop.getLoc(), range, step); dims.push_back(range); lbs.push_back(lowerBound); diff --git a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp index ea8501b9a7e..fdc90851b64 100644 --- a/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp @@ -814,16 +814,20 @@ struct SubIOpLowering : public BinaryOpLLVMOpLowering<SubIOp, LLVM::SubOp> { struct MulIOpLowering : public BinaryOpLLVMOpLowering<MulIOp, LLVM::MulOp> { using Super::Super; }; -struct DivISOpLowering : public BinaryOpLLVMOpLowering<DivISOp, LLVM::SDivOp> { +struct SignedDivIOpLowering + : public BinaryOpLLVMOpLowering<SignedDivIOp, LLVM::SDivOp> { using Super::Super; }; -struct DivIUOpLowering : public BinaryOpLLVMOpLowering<DivIUOp, LLVM::UDivOp> { +struct UnsignedDivIOpLowering + : public BinaryOpLLVMOpLowering<UnsignedDivIOp, LLVM::UDivOp> { using Super::Super; }; -struct RemISOpLowering : public BinaryOpLLVMOpLowering<RemISOp, LLVM::SRemOp> { +struct SignedRemIOpLowering + : public BinaryOpLLVMOpLowering<SignedRemIOp, LLVM::SRemOp> { using Super::Super; }; -struct RemIUOpLowering : public BinaryOpLLVMOpLowering<RemIUOp, LLVM::URemOp> { +struct UnsignedRemIOpLowering + : public BinaryOpLLVMOpLowering<UnsignedRemIOp, LLVM::URemOp> { using Super::Super; }; struct AndOpLowering : public BinaryOpLLVMOpLowering<AndOp, LLVM::AndOp> { @@ -862,6 +866,18 @@ struct ConstLLVMOpLowering : public OneToOneLLVMOpLowering<ConstantOp, LLVM::ConstantOp> { using Super::Super; }; +struct ShiftLeftOpLowering + : public OneToOneLLVMOpLowering<ShiftLeftOp, LLVM::ShlOp> { + using Super::Super; +}; +struct SignedShiftRightOpLowering + : public OneToOneLLVMOpLowering<SignedShiftRightOp, LLVM::AShrOp> { + using Super::Super; +}; +struct UnsignedShiftRightOpLowering + : public OneToOneLLVMOpLowering<UnsignedShiftRightOp, LLVM::LShrOp> { + using Super::Super; +}; // Check if the MemRefType `type` is supported by the lowering. We currently // only support memrefs with identity maps. @@ -2082,8 +2098,6 @@ void mlir::populateStdToLLVMNonMemoryConversionPatterns( CosOpLowering, ConstLLVMOpLowering, DivFOpLowering, - DivISOpLowering, - DivIUOpLowering, ExpOpLowering, LogOpLowering, Log10OpLowering, @@ -2097,18 +2111,23 @@ void mlir::populateStdToLLVMNonMemoryConversionPatterns( OrOpLowering, PrefetchOpLowering, RemFOpLowering, - RemISOpLowering, - RemIUOpLowering, ReturnOpLowering, SIToFPLowering, SelectOpLowering, + ShiftLeftOpLowering, SignExtendIOpLowering, + SignedDivIOpLowering, + SignedRemIOpLowering, + SignedShiftRightOpLowering, SplatOpLowering, SplatNdOpLowering, SubFOpLowering, SubIOpLowering, TanhOpLowering, TruncateIOpLowering, + UnsignedDivIOpLowering, + UnsignedRemIOpLowering, + UnsignedShiftRightOpLowering, XOrOpLowering, ZeroExtendIOpLowering>(*converter.getDialect(), converter); // clang-format on diff --git a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp index e87bd4ef861..a14271efbb6 100644 --- a/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp +++ b/mlir/lib/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.cpp @@ -316,8 +316,8 @@ void populateStandardToSPIRVPatterns(MLIRContext *context, patterns.insert<ConstantIndexOpConversion, CmpIOpConversion, IntegerOpConversion<AddIOp, spirv::IAddOp>, IntegerOpConversion<MulIOp, spirv::IMulOp>, - IntegerOpConversion<DivISOp, spirv::SDivOp>, - IntegerOpConversion<RemISOp, spirv::SModOp>, + IntegerOpConversion<SignedDivIOp, spirv::SDivOp>, + IntegerOpConversion<SignedRemIOp, spirv::SModOp>, IntegerOpConversion<SubIOp, spirv::ISubOp>, LoadOpConversion, ReturnOpConversion, SelectOpConversion, StoreOpConversion>( context, typeConverter); diff --git a/mlir/lib/Dialect/StandardOps/Ops.cpp b/mlir/lib/Dialect/StandardOps/Ops.cpp index d0fd1855f96..4116f6f14ae 100644 --- a/mlir/lib/Dialect/StandardOps/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/Ops.cpp @@ -1320,10 +1320,10 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) { } //===----------------------------------------------------------------------===// -// DivISOp +// SignedDivIOp //===----------------------------------------------------------------------===// -OpFoldResult DivISOp::fold(ArrayRef<Attribute> operands) { +OpFoldResult SignedDivIOp::fold(ArrayRef<Attribute> operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would overflow or if it requires a division by zero. @@ -1339,10 +1339,10 @@ OpFoldResult DivISOp::fold(ArrayRef<Attribute> operands) { } //===----------------------------------------------------------------------===// -// DivIUOp +// UnsignedDivIOp //===----------------------------------------------------------------------===// -OpFoldResult DivIUOp::fold(ArrayRef<Attribute> operands) { +OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) { assert(operands.size() == 2 && "binary operation takes two operands"); // Don't fold if it would require a division by zero. @@ -1885,11 +1885,11 @@ OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) { } //===----------------------------------------------------------------------===// -// RemISOp +// SignedRemIOp //===----------------------------------------------------------------------===// -OpFoldResult RemISOp::fold(ArrayRef<Attribute> operands) { - assert(operands.size() == 2 && "remis takes two operands"); +OpFoldResult SignedRemIOp::fold(ArrayRef<Attribute> operands) { + assert(operands.size() == 2 && "remi_signed takes two operands"); auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>(); if (!rhs) @@ -1911,11 +1911,11 @@ OpFoldResult RemISOp::fold(ArrayRef<Attribute> operands) { } //===----------------------------------------------------------------------===// -// RemIUOp +// UnsignedRemIOp //===----------------------------------------------------------------------===// -OpFoldResult RemIUOp::fold(ArrayRef<Attribute> operands) { - assert(operands.size() == 2 && "remiu takes two operands"); +OpFoldResult UnsignedRemIOp::fold(ArrayRef<Attribute> operands) { + assert(operands.size() == 2 && "remi_unsigned takes two operands"); auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>(); if (!rhs) diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp index 2956066a035..47e2dfed55e 100644 --- a/mlir/lib/EDSC/Builders.cpp +++ b/mlir/lib/EDSC/Builders.cpp @@ -390,14 +390,14 @@ ValueHandle mlir::edsc::op::operator*(ValueHandle lhs, ValueHandle rhs) { } ValueHandle mlir::edsc::op::operator/(ValueHandle lhs, ValueHandle rhs) { - return createBinaryHandle<DivISOp, DivFOp>( + return createBinaryHandle<SignedDivIOp, DivFOp>( lhs, rhs, [](AffineExpr d0, AffineExpr d1) -> AffineExpr { llvm_unreachable("only exprs of non-index type support operator/"); }); } ValueHandle mlir::edsc::op::operator%(ValueHandle lhs, ValueHandle rhs) { - return createBinaryHandle<RemISOp, RemFOp>( + return createBinaryHandle<SignedRemIOp, RemFOp>( lhs, rhs, [](AffineExpr d0, AffineExpr d1) { return d0 % d1; }); } diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 419df8d2705..3691aee4870 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -850,7 +850,7 @@ static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, Value *divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1); Value *divisorCst = builder.create<ConstantIndexOp>(loc, divisor); Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst); - return builder.create<DivISOp>(loc, sum, divisorCst); + return builder.create<SignedDivIOp>(loc, sum, divisorCst); } // Build the IR that performs ceil division of a positive value by another @@ -864,7 +864,7 @@ static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, Value *cstOne = builder.create<ConstantIndexOp>(loc, 1); Value *divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne); Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne); - return builder.create<DivISOp>(loc, sum, divisor); + return builder.create<SignedDivIOp>(loc, sum, divisor); } // Hoist the ops within `outer` that appear before `inner`. @@ -1084,12 +1084,12 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) { for (unsigned i = 0, e = loops.size(); i < e; ++i) { unsigned idx = loops.size() - i - 1; if (i != 0) - previous = - builder.create<DivISOp>(loc, previous, loops[idx + 1].upperBound()); + previous = builder.create<SignedDivIOp>(loc, previous, + loops[idx + 1].upperBound()); Value *iv = (i == e - 1) ? previous - : builder.create<RemISOp>(loc, previous, - loops[idx].upperBound()); + : builder.create<SignedRemIOp>( + loc, previous, loops[idx].upperBound()); replaceAllUsesInRegionWith(loops[idx].getInductionVar(), iv, loops.back().region()); } diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir index 4741c385533..73f0ab7d71b 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_3D.mlir @@ -49,11 +49,11 @@ module { %8 = load %arg1[%iv4, %iv6, %iv5] : memref<?x?x?xf32> %9 = addf %7, %8 : f32 %10 = subi %iv4, %iv1 : index - %11 = divis %10, %step1 : index + %11 = divi_signed %10, %step1 : index %12 = subi %iv5, %iv2 : index - %13 = divis %12, %step2 : index + %13 = divi_signed %12, %step2 : index %14 = subi %iv6, %iv3 : index - %15 = divis %14, %step3 : index + %15 = divi_signed %14, %step3 : index store %9, %6[%11, %13, %15] : memref<?x?x?xf32> } } @@ -62,11 +62,11 @@ module { loop.for %iv8 = %iv2 to %ub2 step %step2 { loop.for %iv9 = %iv3 to %ub3 step %step3 { %16 = subi %iv7, %iv1 : index - %17 = divis %16, %step1 : index + %17 = divi_signed %16, %step1 : index %18 = subi %iv8, %iv2 : index - %19 = divis %18, %step2 : index + %19 = divi_signed %18, %step2 : index %20 = subi %iv9, %iv3 : index - %21 = divis %20, %step3 : index + %21 = divi_signed %20, %step3 : index %22 = load %6[%17, %19, %21] : memref<?x?x?xf32> %23 = load %arg2[%iv9, %iv8, %iv7] : memref<?x?x?xf32> %24 = mulf %22, %23 : f32 diff --git a/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir b/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir index 2753cd28188..2c5dd5c0fb2 100644 --- a/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir +++ b/mlir/test/Conversion/LoopsToGPU/imperfect_4D.mlir @@ -49,11 +49,11 @@ module { %8 = load %arg1[%iv5, %iv6, %iv7, %iv8] : memref<?x?x?x?xf32> %9 = addf %7, %8 : f32 %10 = subi %iv5, %iv1 : index - %11 = divis %10, %step1 : index + %11 = divi_signed %10, %step1 : index %12 = subi %iv6, %iv2 : index - %13 = divis %12, %step2 : index + %13 = divi_signed %12, %step2 : index %14 = subi %iv7, %iv3 : index - %15 = divis %14, %step3 : index + %15 = divi_signed %14, %step3 : index store %9, %6[%11, %13, %15, %iv8] : memref<?x?x?x?xf32> } } @@ -64,11 +64,11 @@ module { loop.for %iv11 = %iv3 to %ub3 step %step3 { loop.for %iv12 = %c0 to %3 step %step4 { %18 = subi %iv9, %iv1 : index - %19 = divis %18, %step1 : index + %19 = divi_signed %18, %step1 : index %20 = subi %iv10, %iv2 : index - %21 = divis %20, %step2 : index + %21 = divi_signed %20, %step2 : index %22 = subi %iv11, %iv3 : index - %23 = divis %22, %step3 : index + %23 = divi_signed %22, %step3 : index %26 = load %6[%19, %21, %23, %iv12] : memref<?x?x?x?xf32> %27 = load %arg2[%iv9, %iv10, %iv12, %iv11] : memref<?x?x?x?xf32> %28 = mulf %26, %27 : f32 diff --git a/mlir/test/Conversion/LoopsToGPU/linalg_to_gpu.mlir b/mlir/test/Conversion/LoopsToGPU/linalg_to_gpu.mlir index a8a2d2d31fb..f4567fb4943 100644 --- a/mlir/test/Conversion/LoopsToGPU/linalg_to_gpu.mlir +++ b/mlir/test/Conversion/LoopsToGPU/linalg_to_gpu.mlir @@ -6,10 +6,10 @@ func @foo(%arg0: memref<?xf32>, %arg1 : index) { %c42 = constant 42 : index %c3 = constant 3 : index // CHECK: subi %{{.*}}, %{{.*}} : index - // CHECK-NEXT: %[[range_i:.*]] = divis {{.*}}, %{{.*}} : index + // CHECK-NEXT: %[[range_i:.*]] = divi_signed {{.*}}, %{{.*}} : index loop.for %i0 = %c0 to %c42 step %c3 { // CHECK: subi %{{.*}}, %{{.*}} : index - // CHECK-NEXT: %[[range_j:.*]] = divis {{.*}}, %{{.*}} : index + // CHECK-NEXT: %[[range_j:.*]] = divi_signed {{.*}}, %{{.*}} : index loop.for %i1 = %c3 to %c42 step %arg1 { // CHECK: gpu.launch // CHECK-SAME: blocks diff --git a/mlir/test/Conversion/LoopsToGPU/step_positive.mlir b/mlir/test/Conversion/LoopsToGPU/step_positive.mlir index dd22f9986ce..6bedc92abca 100644 --- a/mlir/test/Conversion/LoopsToGPU/step_positive.mlir +++ b/mlir/test/Conversion/LoopsToGPU/step_positive.mlir @@ -3,8 +3,8 @@ // CHECK-LABEL: @step_var func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) { // Check that we divide by step. - // CHECK: %[[range_i:.*]] = divis {{.*}}, %{{.*}} - // CHECK: %[[range_j:.*]] = divis {{.*}}, %{{.*}} + // CHECK: %[[range_i:.*]] = divi_signed {{.*}}, %{{.*}} + // CHECK: %[[range_j:.*]] = divi_signed {{.*}}, %{{.*}} // CHECK: gpu.launch // CHECK-SAME: blocks(%{{[^)]*}}, %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[range_i]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}}) diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir index 14e46aabc56..25054bb340e 100644 --- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir @@ -371,13 +371,13 @@ func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64> // CHECK-NEXT: %1 = llvm.fadd %arg0, %0 : !llvm<"<4 x float>"> %1 = addf %arg0, %0 : vector<4xf32> // CHECK-NEXT: %2 = llvm.sdiv %arg2, %arg2 : !llvm<"<4 x i64>"> - %3 = divis %arg2, %arg2 : vector<4xi64> + %3 = divi_signed %arg2, %arg2 : vector<4xi64> // CHECK-NEXT: %3 = llvm.udiv %arg2, %arg2 : !llvm<"<4 x i64>"> - %4 = diviu %arg2, %arg2 : vector<4xi64> + %4 = divi_unsigned %arg2, %arg2 : vector<4xi64> // CHECK-NEXT: %4 = llvm.srem %arg2, %arg2 : !llvm<"<4 x i64>"> - %5 = remis %arg2, %arg2 : vector<4xi64> + %5 = remi_signed %arg2, %arg2 : vector<4xi64> // CHECK-NEXT: %5 = llvm.urem %arg2, %arg2 : !llvm<"<4 x i64>"> - %6 = remiu %arg2, %arg2 : vector<4xi64> + %6 = remi_unsigned %arg2, %arg2 : vector<4xi64> // CHECK-NEXT: %6 = llvm.fdiv %arg0, %0 : !llvm<"<4 x float>"> %7 = divf %arg0, %0 : vector<4xf32> // CHECK-NEXT: %7 = llvm.frem %arg0, %0 : !llvm<"<4 x float>"> @@ -388,6 +388,12 @@ func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64> %10 = or %arg2, %arg3 : vector<4xi64> // CHECK-NEXT: %10 = llvm.xor %arg2, %arg3 : !llvm<"<4 x i64>"> %11 = xor %arg2, %arg3 : vector<4xi64> +// CHECK-NEXT: %11 = llvm.shl %arg2, %arg2 : !llvm<"<4 x i64>"> + %12 = shift_left %arg2, %arg2 : vector<4xi64> +// CHECK-NEXT: %12 = llvm.ashr %arg2, %arg2 : !llvm<"<4 x i64>"> + %13 = shift_right_signed %arg2, %arg2 : vector<4xi64> +// CHECK-NEXT: %13 = llvm.lshr %arg2, %arg2 : !llvm<"<4 x i64>"> + %14 = shift_right_unsigned %arg2, %arg2 : vector<4xi64> return %1 : vector<4xf32> } @@ -401,13 +407,13 @@ func @ops(f32, f32, i32, i32) -> (f32, i32) { // CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : !llvm.i32 %2 = cmpi "slt", %arg2, %1 : i32 // CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : !llvm.i32 - %4 = divis %arg2, %arg3 : i32 + %4 = divi_signed %arg2, %arg3 : i32 // CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : !llvm.i32 - %5 = diviu %arg2, %arg3 : i32 + %5 = divi_unsigned %arg2, %arg3 : i32 // CHECK-NEXT: %5 = llvm.srem %arg2, %arg3 : !llvm.i32 - %6 = remis %arg2, %arg3 : i32 + %6 = remi_signed %arg2, %arg3 : i32 // CHECK-NEXT: %6 = llvm.urem %arg2, %arg3 : !llvm.i32 - %7 = remiu %arg2, %arg3 : i32 + %7 = remi_unsigned %arg2, %arg3 : i32 // CHECK-NEXT: %7 = llvm.select %2, %arg2, %arg3 : !llvm.i1, !llvm.i32 %8 = select %2, %arg2, %arg3 : i32 // CHECK-NEXT: %8 = llvm.fdiv %arg0, %arg1 : !llvm.float @@ -428,6 +434,12 @@ func @ops(f32, f32, i32, i32) -> (f32, i32) { %16 = constant 7.9e-01 : f64 // CHECK-NEXT: %16 = llvm.call @tanh(%15) : (!llvm.double) -> !llvm.double %17 = std.tanh %16 : f64 +// CHECK-NEXT: %17 = llvm.shl %arg2, %arg3 : !llvm.i32 + %18 = shift_left %arg2, %arg3 : i32 +// CHECK-NEXT: %18 = llvm.ashr %arg2, %arg3 : !llvm.i32 + %19 = shift_right_signed %arg2, %arg3 : i32 +// CHECK-NEXT: %19 = llvm.lshr %arg2, %arg3 : !llvm.i32 + %20 = shift_right_unsigned %arg2, %arg3 : i32 return %0, %4 : f32, i32 } diff --git a/mlir/test/Conversion/StandardToSPIRV/std-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-to-spirv.mlir index c1134935f33..8876c5178e0 100644 --- a/mlir/test/Conversion/StandardToSPIRV/std-to-spirv.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/std-to-spirv.mlir @@ -88,9 +88,9 @@ func @fsub_scalar(%arg: f32) -> f32 { // CHECK-LABEL: @div_rem func @div_rem(%arg0 : i32, %arg1 : i32) { // CHECK: spv.SDiv - %0 = divis %arg0, %arg1 : i32 + %0 = divi_signed %arg0, %arg1 : i32 // CHECK: spv.SMod - %1 = remis %arg0, %arg1 : i32 + %1 = remi_signed %arg0, %arg1 : i32 return } diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp index 81bb0b94efe..0b105eadf5a 100644 --- a/mlir/test/EDSC/builder-api-test.cpp +++ b/mlir/test/EDSC/builder-api-test.cpp @@ -101,8 +101,8 @@ TEST_FUNC(builder_dynamic_for_func_args) { // CHECK-DAG: [[rf4:%[0-9]+]] = mulf {{.*}}, {{.*}} : f32 // CHECK: {{.*}} = subf [[rf3]], [[rf4]] : f32 // CHECK-DAG: [[ri1:%[0-9]+]] = addi {{.*}}, {{.*}} : i32 - // CHECK-DAG: [[ri2:%[0-9]+]] = divis [[ri1]], {{.*}} : i32 - // CHECK-DAG: [[ri3:%[0-9]+]] = remis [[ri2]], {{.*}} : i32 + // CHECK-DAG: [[ri2:%[0-9]+]] = divi_signed [[ri1]], {{.*}} : i32 + // CHECK-DAG: [[ri3:%[0-9]+]] = remi_signed [[ri2]], {{.*}} : i32 // CHECK-DAG: [[ri4:%[0-9]+]] = muli {{.*}}, {{.*}} : i32 // CHECK: {{.*}} = subi [[ri3]], [[ri4]] : i32 // clang-format on diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir index c3a9ee462b1..aac0dfc03a3 100644 --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -153,65 +153,65 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) { // CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %cst_4, %cst_4 : tensor<42xi32> %25 = "std.select"(%19, %tci32, %tci32) : (tensor<42 x i1>, tensor<42 x i32>, tensor<42 x i32>) -> tensor<42 x i32> - // CHECK: %{{[0-9]+}} = divis %arg2, %arg2 : i32 - %26 = divis %i, %i : i32 + // CHECK: %{{[0-9]+}} = divi_signed %arg2, %arg2 : i32 + %26 = divi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = divis %arg3, %arg3 : index - %27 = divis %idx, %idx : index + // CHECK: %{{[0-9]+}} = divi_signed %arg3, %arg3 : index + %27 = divi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = divis %cst_5, %cst_5 : vector<42xi32> - %28 = divis %vci32, %vci32 : vector<42 x i32> + // CHECK: %{{[0-9]+}} = divi_signed %cst_5, %cst_5 : vector<42xi32> + %28 = divi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = divis %cst_4, %cst_4 : tensor<42xi32> - %29 = divis %tci32, %tci32 : tensor<42 x i32> + // CHECK: %{{[0-9]+}} = divi_signed %cst_4, %cst_4 : tensor<42xi32> + %29 = divi_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = divis %arg2, %arg2 : i32 - %30 = "std.divis"(%i, %i) : (i32, i32) -> i32 + // CHECK: %{{[0-9]+}} = divi_signed %arg2, %arg2 : i32 + %30 = "std.divi_signed"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = diviu %arg2, %arg2 : i32 - %31 = diviu %i, %i : i32 + // CHECK: %{{[0-9]+}} = divi_unsigned %arg2, %arg2 : i32 + %31 = divi_unsigned %i, %i : i32 - // CHECK: %{{[0-9]+}} = diviu %arg3, %arg3 : index - %32 = diviu %idx, %idx : index + // CHECK: %{{[0-9]+}} = divi_unsigned %arg3, %arg3 : index + %32 = divi_unsigned %idx, %idx : index - // CHECK: %{{[0-9]+}} = diviu %cst_5, %cst_5 : vector<42xi32> - %33 = diviu %vci32, %vci32 : vector<42 x i32> + // CHECK: %{{[0-9]+}} = divi_unsigned %cst_5, %cst_5 : vector<42xi32> + %33 = divi_unsigned %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = diviu %cst_4, %cst_4 : tensor<42xi32> - %34 = diviu %tci32, %tci32 : tensor<42 x i32> + // CHECK: %{{[0-9]+}} = divi_unsigned %cst_4, %cst_4 : tensor<42xi32> + %34 = divi_unsigned %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = diviu %arg2, %arg2 : i32 - %35 = "std.diviu"(%i, %i) : (i32, i32) -> i32 + // CHECK: %{{[0-9]+}} = divi_unsigned %arg2, %arg2 : i32 + %35 = "std.divi_unsigned"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = remis %arg2, %arg2 : i32 - %36 = remis %i, %i : i32 + // CHECK: %{{[0-9]+}} = remi_signed %arg2, %arg2 : i32 + %36 = remi_signed %i, %i : i32 - // CHECK: %{{[0-9]+}} = remis %arg3, %arg3 : index - %37 = remis %idx, %idx : index + // CHECK: %{{[0-9]+}} = remi_signed %arg3, %arg3 : index + %37 = remi_signed %idx, %idx : index - // CHECK: %{{[0-9]+}} = remis %cst_5, %cst_5 : vector<42xi32> - %38 = remis %vci32, %vci32 : vector<42 x i32> + // CHECK: %{{[0-9]+}} = remi_signed %cst_5, %cst_5 : vector<42xi32> + %38 = remi_signed %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = remis %cst_4, %cst_4 : tensor<42xi32> - %39 = remis %tci32, %tci32 : tensor<42 x i32> + // CHECK: %{{[0-9]+}} = remi_signed %cst_4, %cst_4 : tensor<42xi32> + %39 = remi_signed %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = remis %arg2, %arg2 : i32 - %40 = "std.remis"(%i, %i) : (i32, i32) -> i32 + // CHECK: %{{[0-9]+}} = remi_signed %arg2, %arg2 : i32 + %40 = "std.remi_signed"(%i, %i) : (i32, i32) -> i32 - // CHECK: %{{[0-9]+}} = remiu %arg2, %arg2 : i32 - %41 = remiu %i, %i : i32 + // CHECK: %{{[0-9]+}} = remi_unsigned %arg2, %arg2 : i32 + %41 = remi_unsigned %i, %i : i32 - // CHECK: %{{[0-9]+}} = remiu %arg3, %arg3 : index - %42 = remiu %idx, %idx : index + // CHECK: %{{[0-9]+}} = remi_unsigned %arg3, %arg3 : index + %42 = remi_unsigned %idx, %idx : index - // CHECK: %{{[0-9]+}} = remiu %cst_5, %cst_5 : vector<42xi32> - %43 = remiu %vci32, %vci32 : vector<42 x i32> + // CHECK: %{{[0-9]+}} = remi_unsigned %cst_5, %cst_5 : vector<42xi32> + %43 = remi_unsigned %vci32, %vci32 : vector<42 x i32> - // CHECK: %{{[0-9]+}} = remiu %cst_4, %cst_4 : tensor<42xi32> - %44 = remiu %tci32, %tci32 : tensor<42 x i32> + // CHECK: %{{[0-9]+}} = remi_unsigned %cst_4, %cst_4 : tensor<42xi32> + %44 = remi_unsigned %tci32, %tci32 : tensor<42 x i32> - // CHECK: %{{[0-9]+}} = remiu %arg2, %arg2 : i32 - %45 = "std.remiu"(%i, %i) : (i32, i32) -> i32 + // CHECK: %{{[0-9]+}} = remi_unsigned %arg2, %arg2 : i32 + %45 = "std.remi_unsigned"(%i, %i) : (i32, i32) -> i32 // CHECK: %{{[0-9]+}} = divf %arg1, %arg1 : f32 %46 = "std.divf"(%f, %f) : (f32,f32) -> f32 @@ -448,6 +448,52 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) { // CHECK: %{{[0-9]+}} = tanh %arg0 : tensor<4x4x?xf32> %123 = tanh %t : tensor<4x4x?xf32> + + // CHECK: %{{[0-9]+}} = shift_left %arg2, %arg2 : i32 + %124 = "std.shift_left"(%i, %i) : (i32, i32) -> i32 + + // CHECK:%{{[0-9]+}} = shift_left %4, %4 : i32 + %125 = shift_left %i2, %i2 : i32 + + // CHECK: %{{[0-9]+}} = shift_left %arg3, %arg3 : index + %126 = shift_left %idx, %idx : index + + // CHECK: %{{[0-9]+}} = shift_left %cst_5, %cst_5 : vector<42xi32> + %127 = shift_left %vci32, %vci32 : vector<42 x i32> + + // CHECK: %{{[0-9]+}} = shift_left %cst_4, %cst_4 : tensor<42xi32> + %128 = shift_left %tci32, %tci32 : tensor<42 x i32> + + // CHECK: %{{[0-9]+}} = shift_right_signed %arg2, %arg2 : i32 + %129 = "std.shift_right_signed"(%i, %i) : (i32, i32) -> i32 + + // CHECK:%{{[0-9]+}} = shift_right_signed %4, %4 : i32 + %130 = shift_right_signed %i2, %i2 : i32 + + // CHECK: %{{[0-9]+}} = shift_right_signed %arg3, %arg3 : index + %131 = shift_right_signed %idx, %idx : index + + // CHECK: %{{[0-9]+}} = shift_right_signed %cst_5, %cst_5 : vector<42xi32> + %132 = shift_right_signed %vci32, %vci32 : vector<42 x i32> + + // CHECK: %{{[0-9]+}} = shift_right_signed %cst_4, %cst_4 : tensor<42xi32> + %133 = shift_right_signed %tci32, %tci32 : tensor<42 x i32> + + // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg2, %arg2 : i32 + %134 = "std.shift_right_unsigned"(%i, %i) : (i32, i32) -> i32 + + // CHECK:%{{[0-9]+}} = shift_right_unsigned %4, %4 : i32 + %135 = shift_right_unsigned %i2, %i2 : i32 + + // CHECK: %{{[0-9]+}} = shift_right_unsigned %arg3, %arg3 : index + %136 = shift_right_unsigned %idx, %idx : index + + // CHECK: %{{[0-9]+}} = shift_right_unsigned %cst_5, %cst_5 : vector<42xi32> + %137 = shift_right_unsigned %vci32, %vci32 : vector<42 x i32> + + // CHECK: %{{[0-9]+}} = shift_right_unsigned %cst_4, %cst_4 : tensor<42xi32> + %138 = shift_right_unsigned %tci32, %tci32 : tensor<42 x i32> + return } diff --git a/mlir/test/Target/llvmir.mlir b/mlir/test/Target/llvmir.mlir index 0ad007310c9..9d0ee383046 100644 --- a/mlir/test/Target/llvmir.mlir +++ b/mlir/test/Target/llvmir.mlir @@ -794,6 +794,12 @@ llvm.func @vector_ops(%arg0: !llvm<"<4 x float>">, %arg1: !llvm<"<4 x i1>">, %ar %10 = llvm.or %arg2, %arg2 : !llvm<"<4 x i64>"> // CHECK-NEXT: %14 = xor <4 x i64> %2, %2 %11 = llvm.xor %arg2, %arg2 : !llvm<"<4 x i64>"> +// CHECK-NEXT: %15 = shl <4 x i64> %2, %2 + %12 = llvm.shl %arg2, %arg2 : !llvm<"<4 x i64>"> +// CHECK-NEXT: %16 = lshr <4 x i64> %2, %2 + %13 = llvm.lshr %arg2, %arg2 : !llvm<"<4 x i64>"> +// CHECK-NEXT: %17 = ashr <4 x i64> %2, %2 + %14 = llvm.ashr %arg2, %arg2 : !llvm<"<4 x i64>"> // CHECK-NEXT: ret <4 x float> %4 llvm.return %1 : !llvm<"<4 x float>"> } diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir index 07a7e7cad05..09db0889655 100644 --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -576,7 +576,7 @@ func @lowered_affine_mod() -> (index, index) { // CHECK-NEXT: {{.*}} = constant 41 : index %c-43 = constant -43 : index %c42 = constant 42 : index - %0 = remis %c-43, %c42 : index + %0 = remi_signed %c-43, %c42 : index %c0 = constant 0 : index %1 = cmpi "slt", %0, %c0 : index %2 = addi %0, %c42 : index @@ -584,7 +584,7 @@ func @lowered_affine_mod() -> (index, index) { // CHECK-NEXT: {{.*}} = constant 1 : index %c43 = constant 43 : index %c42_0 = constant 42 : index - %4 = remis %c43, %c42_0 : index + %4 = remi_signed %c43, %c42_0 : index %c0_1 = constant 0 : index %5 = cmpi "slt", %4, %c0_1 : index %6 = addi %4, %c42_0 : index @@ -607,7 +607,7 @@ func @lowered_affine_floordiv() -> (index, index) { %0 = cmpi "slt", %c-43, %c0 : index %1 = subi %c-1, %c-43 : index %2 = select %0, %1, %c-43 : index - %3 = divis %2, %c42 : index + %3 = divi_signed %2, %c42 : index %4 = subi %c-1, %3 : index %5 = select %0, %4, %3 : index // CHECK-NEXT: %c1 = constant 1 : index @@ -618,7 +618,7 @@ func @lowered_affine_floordiv() -> (index, index) { %6 = cmpi "slt", %c43, %c0_1 : index %7 = subi %c-1_2, %c43 : index %8 = select %6, %7, %c43 : index - %9 = divis %8, %c42_0 : index + %9 = divi_signed %8, %c42_0 : index %10 = subi %c-1_2, %9 : index %11 = select %6, %10, %9 : index return %5, %11 : index, index @@ -640,7 +640,7 @@ func @lowered_affine_ceildiv() -> (index, index) { %1 = subi %c0, %c-43 : index %2 = subi %c-43, %c1 : index %3 = select %0, %1, %2 : index - %4 = divis %3, %c42 : index + %4 = divi_signed %3, %c42 : index %5 = subi %c0, %4 : index %6 = addi %4, %c1 : index %7 = select %0, %5, %6 : index @@ -653,7 +653,7 @@ func @lowered_affine_ceildiv() -> (index, index) { %9 = subi %c0_1, %c43 : index %10 = subi %c43, %c1_2 : index %11 = select %8, %9, %10 : index - %12 = divis %11, %c42_0 : index + %12 = divi_signed %11, %c42_0 : index %13 = subi %c0_1, %12 : index %14 = addi %12, %c1_2 : index %15 = select %8, %13, %14 : index diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir index b45fec665ef..a24aad2847a 100644 --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -212,8 +212,8 @@ func @mulf_splat_tensor() -> tensor<4xf32> { // ----- -// CHECK-LABEL: func @simple_divis -func @simple_divis() -> (i32, i32, i32) { +// CHECK-LABEL: func @simple_divi_signed +func @simple_divi_signed() -> (i32, i32, i32) { // CHECK-DAG: [[C0:%.+]] = constant 0 %z = constant 0 : i32 // CHECK-DAG: [[C6:%.+]] = constant 6 @@ -221,15 +221,15 @@ func @simple_divis() -> (i32, i32, i32) { %1 = constant 2 : i32 // CHECK-NEXT: [[C3:%.+]] = constant 3 : i32 - %2 = divis %0, %1 : i32 + %2 = divi_signed %0, %1 : i32 %3 = constant -2 : i32 // CHECK-NEXT: [[CM3:%.+]] = constant -3 : i32 - %4 = divis %0, %3 : i32 + %4 = divi_signed %0, %3 : i32 - // CHECK-NEXT: [[XZ:%.+]] = divis [[C6]], [[C0]] - %5 = divis %0, %z : i32 + // CHECK-NEXT: [[XZ:%.+]] = divi_signed [[C6]], [[C0]] + %5 = divi_signed %0, %z : i32 // CHECK-NEXT: return [[C3]], [[CM3]], [[XZ]] return %2, %4, %5 : i32, i32, i32 @@ -237,8 +237,8 @@ func @simple_divis() -> (i32, i32, i32) { // ----- -// CHECK-LABEL: func @divis_splat_tensor -func @divis_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { +// CHECK-LABEL: func @divi_signed_splat_tensor +func @divi_signed_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { // CHECK-DAG: [[C0:%.+]] = constant dense<0> %z = constant dense<0> : tensor<4xi32> // CHECK-DAG: [[C6:%.+]] = constant dense<6> @@ -246,15 +246,15 @@ func @divis_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { %1 = constant dense<2> : tensor<4xi32> // CHECK-NEXT: [[C3:%.+]] = constant dense<3> : tensor<4xi32> - %2 = divis %0, %1 : tensor<4xi32> + %2 = divi_signed %0, %1 : tensor<4xi32> %3 = constant dense<-2> : tensor<4xi32> // CHECK-NEXT: [[CM3:%.+]] = constant dense<-3> : tensor<4xi32> - %4 = divis %0, %3 : tensor<4xi32> + %4 = divi_signed %0, %3 : tensor<4xi32> - // CHECK-NEXT: [[XZ:%.+]] = divis [[C6]], [[C0]] - %5 = divis %0, %z : tensor<4xi32> + // CHECK-NEXT: [[XZ:%.+]] = divi_signed [[C6]], [[C0]] + %5 = divi_signed %0, %z : tensor<4xi32> // CHECK-NEXT: return [[C3]], [[CM3]], [[XZ]] return %2, %4, %5 : tensor<4xi32>, tensor<4xi32>, tensor<4xi32> @@ -262,24 +262,24 @@ func @divis_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { // ----- -// CHECK-LABEL: func @simple_diviu -func @simple_diviu() -> (i32, i32, i32) { +// CHECK-LABEL: func @simple_divi_unsigned +func @simple_divi_unsigned() -> (i32, i32, i32) { %z = constant 0 : i32 // CHECK-DAG: [[C6:%.+]] = constant 6 %0 = constant 6 : i32 %1 = constant 2 : i32 // CHECK-DAG: [[C3:%.+]] = constant 3 : i32 - %2 = diviu %0, %1 : i32 + %2 = divi_unsigned %0, %1 : i32 %3 = constant -2 : i32 // Unsigned division interprets -2 as 2^32-2, so the result is 0. // CHECK-DAG: [[C0:%.+]] = constant 0 : i32 - %4 = diviu %0, %3 : i32 + %4 = divi_unsigned %0, %3 : i32 - // CHECK-NEXT: [[XZ:%.+]] = diviu [[C6]], [[C0]] - %5 = diviu %0, %z : i32 + // CHECK-NEXT: [[XZ:%.+]] = divi_unsigned [[C6]], [[C0]] + %5 = divi_unsigned %0, %z : i32 // CHECK-NEXT: return [[C3]], [[C0]], [[XZ]] return %2, %4, %5 : i32, i32, i32 @@ -288,24 +288,24 @@ func @simple_diviu() -> (i32, i32, i32) { // ----- -// CHECK-LABEL: func @diviu_splat_tensor -func @diviu_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { +// CHECK-LABEL: func @divi_unsigned_splat_tensor +func @divi_unsigned_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { %z = constant dense<0> : tensor<4xi32> // CHECK-DAG: [[C6:%.+]] = constant dense<6> %0 = constant dense<6> : tensor<4xi32> %1 = constant dense<2> : tensor<4xi32> // CHECK-DAG: [[C3:%.+]] = constant dense<3> : tensor<4xi32> - %2 = diviu %0, %1 : tensor<4xi32> + %2 = divi_unsigned %0, %1 : tensor<4xi32> %3 = constant dense<-2> : tensor<4xi32> // Unsigned division interprets -2 as 2^32-2, so the result is 0. // CHECK-DAG: [[C0:%.+]] = constant dense<0> : tensor<4xi32> - %4 = diviu %0, %3 : tensor<4xi32> + %4 = divi_unsigned %0, %3 : tensor<4xi32> - // CHECK-NEXT: [[XZ:%.+]] = diviu [[C6]], [[C0]] - %5 = diviu %0, %z : tensor<4xi32> + // CHECK-NEXT: [[XZ:%.+]] = divi_unsigned [[C6]], [[C0]] + %5 = divi_unsigned %0, %z : tensor<4xi32> // CHECK-NEXT: return [[C3]], [[C0]], [[XZ]] return %2, %4, %5 : tensor<4xi32>, tensor<4xi32>, tensor<4xi32> @@ -313,18 +313,18 @@ func @diviu_splat_tensor() -> (tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) { // ----- -// CHECK-LABEL: func @simple_remis -func @simple_remis(%a : i32) -> (i32, i32, i32) { +// CHECK-LABEL: func @simple_remi_signed +func @simple_remi_signed(%a : i32) -> (i32, i32, i32) { %0 = constant 5 : i32 %1 = constant 2 : i32 %2 = constant 1 : i32 %3 = constant -2 : i32 // CHECK-NEXT:[[C1:%.+]] = constant 1 : i32 - %4 = remis %0, %1 : i32 - %5 = remis %0, %3 : i32 + %4 = remi_signed %0, %1 : i32 + %5 = remi_signed %0, %3 : i32 // CHECK-NEXT:[[C0:%.+]] = constant 0 : i32 - %6 = remis %a, %2 : i32 + %6 = remi_signed %a, %2 : i32 // CHECK-NEXT: return [[C1]], [[C1]], [[C0]] : i32, i32, i32 return %4, %5, %6 : i32, i32, i32 @@ -332,19 +332,19 @@ func @simple_remis(%a : i32) -> (i32, i32, i32) { // ----- -// CHECK-LABEL: func @simple_remiu -func @simple_remiu(%a : i32) -> (i32, i32, i32) { +// CHECK-LABEL: func @simple_remi_unsigned +func @simple_remi_unsigned(%a : i32) -> (i32, i32, i32) { %0 = constant 5 : i32 %1 = constant 2 : i32 %2 = constant 1 : i32 %3 = constant -2 : i32 // CHECK-DAG:[[C1:%.+]] = constant 1 : i32 - %4 = remiu %0, %1 : i32 + %4 = remi_unsigned %0, %1 : i32 // CHECK-DAG:[[C5:%.+]] = constant 5 : i32 - %5 = remiu %0, %3 : i32 + %5 = remi_unsigned %0, %3 : i32 // CHECK-DAG:[[C0:%.+]] = constant 0 : i32 - %6 = remiu %a, %2 : i32 + %6 = remi_unsigned %a, %2 : i32 // CHECK-NEXT: return [[C1]], [[C5]], [[C0]] : i32, i32, i32 return %4, %5, %6 : i32, i32, i32 diff --git a/mlir/test/Transforms/loop-coalescing.mlir b/mlir/test/Transforms/loop-coalescing.mlir index 45e2b5d07fc..d10cf19543e 100644 --- a/mlir/test/Transforms/loop-coalescing.mlir +++ b/mlir/test/Transforms/loop-coalescing.mlir @@ -26,10 +26,10 @@ func @one_3d_nest() { // CHECK-NOT: loop.for // Reconstruct original IVs from the linearized one. - // CHECK: %[[orig_k:.*]] = remis %[[i]], %[[orig_ub_k]] - // CHECK: %[[div:.*]] = divis %[[i]], %[[orig_ub_k]] - // CHECK: %[[orig_j:.*]] = remis %[[div]], %[[orig_ub_j]] - // CHECK: %[[orig_i:.*]] = divis %[[div]], %[[orig_ub_j]] + // CHECK: %[[orig_k:.*]] = remi_signed %[[i]], %[[orig_ub_k]] + // CHECK: %[[div:.*]] = divi_signed %[[i]], %[[orig_ub_k]] + // CHECK: %[[orig_j:.*]] = remi_signed %[[div]], %[[orig_ub_j]] + // CHECK: %[[orig_i:.*]] = divi_signed %[[div]], %[[orig_ub_j]] loop.for %j = %c0 to %c56 step %c1 { loop.for %k = %c0 to %c3 step %c1 { // CHECK: "use"(%[[orig_i]], %[[orig_j]], %[[orig_k]]) @@ -52,10 +52,10 @@ func @multi_use() { loop.for %i = %c1 to %c10 step %c1 { loop.for %j = %c1 to %c10 step %c1 { loop.for %k = %c1 to %c10 step %c1 { - // CHECK: %[[k_unshifted:.*]] = remis %[[iv]], %[[k_extent:.*]] - // CHECK: %[[ij:.*]] = divis %[[iv]], %[[k_extent]] - // CHECK: %[[j_unshifted:.*]] = remis %[[ij]], %[[j_extent:.*]] - // CHECK: %[[i_unshifted:.*]] = divis %[[ij]], %[[j_extent]] + // CHECK: %[[k_unshifted:.*]] = remi_signed %[[iv]], %[[k_extent:.*]] + // CHECK: %[[ij:.*]] = divi_signed %[[iv]], %[[k_extent]] + // CHECK: %[[j_unshifted:.*]] = remi_signed %[[ij]], %[[j_extent:.*]] + // CHECK: %[[i_unshifted:.*]] = divi_signed %[[ij]], %[[j_extent]] // CHECK: %[[k:.*]] = addi %[[k_unshifted]] // CHECK: %[[j:.*]] = addi %[[j_unshifted]] // CHECK: %[[i:.*]] = addi %[[i_unshifted]] @@ -91,7 +91,7 @@ func @unnormalized_loops() { // CHECK: %[[c1:.*]] = constant 1 // CHECK: %[[step_minus_c1:.*]] = subi %[[orig_step_i]], %[[c1]] // CHECK: %[[dividend:.*]] = addi %[[diff_i]], %[[step_minus_c1]] - // CHECK: %[[numiter_i:.*]] = divis %[[dividend]], %[[orig_step_i]] + // CHECK: %[[numiter_i:.*]] = divi_signed %[[dividend]], %[[orig_step_i]] // Normalized lower bound and step for the outer loop. // CHECK: %[[lb_i:.*]] = constant 0 @@ -99,7 +99,7 @@ func @unnormalized_loops() { // Number of iterations in the inner loop, the pattern is the same as above, // only capture the final result. - // CHECK: %[[numiter_j:.*]] = divis {{.*}}, %[[orig_step_j]] + // CHECK: %[[numiter_j:.*]] = divi_signed {{.*}}, %[[orig_step_j]] // New bounds of the outer loop. // CHECK: %[[range:.*]] = muli %[[numiter_i]], %[[numiter_j]] @@ -109,8 +109,8 @@ func @unnormalized_loops() { // CHECK-NOT: loop.for loop.for %j = %c7 to %c17 step %c3 { // The IVs are rewritten. - // CHECK: %[[normalized_j:.*]] = remis %[[i]], %[[numiter_j]] - // CHECK: %[[normalized_i:.*]] = divis %[[i]], %[[numiter_j]] + // CHECK: %[[normalized_j:.*]] = remi_signed %[[i]], %[[numiter_j]] + // CHECK: %[[normalized_i:.*]] = divi_signed %[[i]], %[[numiter_j]] // CHECK: %[[scaled_j:.*]] = muli %[[normalized_j]], %[[orig_step_j]] // CHECK: %[[orig_j:.*]] = addi %[[scaled_j]], %[[orig_lb_j]] // CHECK: %[[scaled_i:.*]] = muli %[[normalized_i]], %[[orig_step_i]] @@ -137,11 +137,11 @@ func @parametric(%lb1 : index, %ub1 : index, %step1 : index, // CHECK: %[[range1:.*]] = subi %[[orig_ub1]], %[[orig_lb1]] // CHECK: %[[orig_step1_minus_1:.*]] = subi %[[orig_step1]], %c1 // CHECK: %[[dividend1:.*]] = addi %[[range1]], %[[orig_step1_minus_1]] - // CHECK: %[[numiter1:.*]] = divis %[[dividend1]], %[[orig_step1]] + // CHECK: %[[numiter1:.*]] = divi_signed %[[dividend1]], %[[orig_step1]] // CHECK: %[[range2:.*]] = subi %[[orig_ub2]], %[[orig_lb2]] // CHECK: %[[orig_step2_minus_1:.*]] = subi %arg5, %c1 // CHECK: %[[dividend2:.*]] = addi %[[range2]], %[[orig_step2_minus_1]] - // CHECK: %[[numiter2:.*]] = divis %[[dividend2]], %[[orig_step2]] + // CHECK: %[[numiter2:.*]] = divi_signed %[[dividend2]], %[[orig_step2]] // CHECK: %[[range:.*]] = muli %[[numiter1]], %[[numiter2]] : index // Check that the outer loop is updated. @@ -151,8 +151,8 @@ func @parametric(%lb1 : index, %ub1 : index, %step1 : index, // CHECK-NOT: loop.for loop.for %j = %lb2 to %ub2 step %step2 { // Remapping of the induction variables. - // CHECK: %[[normalized_j:.*]] = remis %[[i]], %[[numiter2]] : index - // CHECK: %[[normalized_i:.*]] = divis %[[i]], %[[numiter2]] : index + // CHECK: %[[normalized_j:.*]] = remi_signed %[[i]], %[[numiter2]] : index + // CHECK: %[[normalized_i:.*]] = divi_signed %[[i]], %[[numiter2]] : index // CHECK: %[[scaled_j:.*]] = muli %[[normalized_j]], %[[orig_step2]] // CHECK: %[[orig_j:.*]] = addi %[[scaled_j]], %[[orig_lb2]] // CHECK: %[[scaled_i:.*]] = muli %[[normalized_i]], %[[orig_step1]] diff --git a/mlir/test/Transforms/lower-affine.mlir b/mlir/test/Transforms/lower-affine.mlir index dac35578808..1c3de885adf 100644 --- a/mlir/test/Transforms/lower-affine.mlir +++ b/mlir/test/Transforms/lower-affine.mlir @@ -452,7 +452,7 @@ func @args_ret_affine_apply(index, index) -> (index, index) { // CHECK-LABEL: func @affine_apply_mod func @affine_apply_mod(%arg0 : index) -> (index) { // CHECK-NEXT: %[[c42:.*]] = constant 42 : index -// CHECK-NEXT: %[[v0:.*]] = remis %{{.*}}, %[[c42]] : index +// CHECK-NEXT: %[[v0:.*]] = remi_signed %{{.*}}, %[[c42]] : index // CHECK-NEXT: %[[c0:.*]] = constant 0 : index // CHECK-NEXT: %[[v1:.*]] = cmpi "slt", %[[v0]], %[[c0]] : index // CHECK-NEXT: %[[v2:.*]] = addi %[[v0]], %[[c42]] : index @@ -476,7 +476,7 @@ func @affine_apply_floordiv(%arg0 : index) -> (index) { // CHECK-NEXT: %[[v0:.*]] = cmpi "slt", %{{.*}}, %[[c0]] : index // CHECK-NEXT: %[[v1:.*]] = subi %[[cm1]], %{{.*}} : index // CHECK-NEXT: %[[v2:.*]] = select %[[v0]], %[[v1]], %{{.*}} : index -// CHECK-NEXT: %[[v3:.*]] = divis %[[v2]], %[[c42]] : index +// CHECK-NEXT: %[[v3:.*]] = divi_signed %[[v2]], %[[c42]] : index // CHECK-NEXT: %[[v4:.*]] = subi %[[cm1]], %[[v3]] : index // CHECK-NEXT: %[[v5:.*]] = select %[[v0]], %[[v4]], %[[v3]] : index %0 = affine.apply #mapfloordiv (%arg0) @@ -499,7 +499,7 @@ func @affine_apply_ceildiv(%arg0 : index) -> (index) { // CHECK-NEXT: %[[v1:.*]] = subi %[[c0]], %{{.*}} : index // CHECK-NEXT: %[[v2:.*]] = subi %{{.*}}, %[[c1]] : index // CHECK-NEXT: %[[v3:.*]] = select %[[v0]], %[[v1]], %[[v2]] : index -// CHECK-NEXT: %[[v4:.*]] = divis %[[v3]], %[[c42]] : index +// CHECK-NEXT: %[[v4:.*]] = divi_signed %[[v3]], %[[c42]] : index // CHECK-NEXT: %[[v5:.*]] = subi %[[c0]], %[[v4]] : index // CHECK-NEXT: %[[v6:.*]] = addi %[[v4]], %[[c1]] : index // CHECK-NEXT: %[[v7:.*]] = select %[[v0]], %[[v5]], %[[v6]] : index diff --git a/mlir/test/Transforms/parametric-tiling.mlir b/mlir/test/Transforms/parametric-tiling.mlir index 2e715fd3176..afa33cb07c1 100644 --- a/mlir/test/Transforms/parametric-tiling.mlir +++ b/mlir/test/Transforms/parametric-tiling.mlir @@ -12,11 +12,11 @@ func @rectangular(%arg0: memref<?x?xf32>) { // COMMON: %[[diff:.*]] = subi %c44, %c2 // COMMON: %[[adjustment:.*]] = subi %c1, %c1_{{.*}} // COMMON-NEXT: %[[diff_adj:.*]] = addi %[[diff]], %[[adjustment]] - // COMMON-NEXT: %[[range:.*]] = divis %[[diff_adj]], %c1 + // COMMON-NEXT: %[[range:.*]] = divi_signed %[[diff_adj]], %c1 // Ceildiv to get the parametric tile size. // COMMON: %[[sum:.*]] = addi %[[range]], %c6 - // COMMON-NEXT: %[[size:.*]] = divis %[[sum]], %c7 + // COMMON-NEXT: %[[size:.*]] = divi_signed %[[sum]], %c7 // New outer step (original is %c1). // COMMON-NEXT: %[[step:.*]] = muli %c1, %[[size]] @@ -26,11 +26,11 @@ func @rectangular(%arg0: memref<?x?xf32>) { // TILE_74: %[[diff2:.*]] = subi %c44, %c1 // TILE_74: %[[adjustment2:.*]] = subi %c2, %c1_{{.*}} // TILE_74-NEXT: %[[diff2_adj:.*]] = addi %[[diff2]], %[[adjustment2]] - // TILE_74-NEXT: %[[range2:.*]] = divis %[[diff2_adj]], %c2 + // TILE_74-NEXT: %[[range2:.*]] = divi_signed %[[diff2_adj]], %c2 // Ceildiv to get the parametric tile size for the second original loop. // TILE_74: %[[sum2:.*]] = addi %[[range2]], %c3 - // TILE_74-NEXT: %[[size2:.*]] = divis %[[sum2]], %c4 + // TILE_74-NEXT: %[[size2:.*]] = divi_signed %[[sum2]], %c4 // New inner step (original is %c2). // TILE_74-NEXT: %[[step2:.*]] = muli %c2, %[[size2]] @@ -76,11 +76,11 @@ func @triangular(%arg0: memref<?x?xf32>) { // COMMON: %[[diff:.*]] = subi %c44, %c2 // COMMON: %[[adjustment:.*]] = subi %c1, %c1_{{.*}} // COMMON-NEXT: %[[diff_adj:.*]] = addi %[[diff]], %[[adjustment]] - // COMMON-NEXT: %[[range:.*]] = divis %[[diff_adj]], %c1 + // COMMON-NEXT: %[[range:.*]] = divi_signed %[[diff_adj]], %c1 // Ceildiv to get the parametric tile size. // COMMON: %[[sum:.*]] = addi %[[range]], %c6 - // COMMON-NEXT: %[[size:.*]] = divis %[[sum]], %c7 + // COMMON-NEXT: %[[size:.*]] = divi_signed %[[sum]], %c7 // New outer step (original is %c1). // COMMON-NEXT: %[[step:.*]] = muli %c1, %[[size]] @@ -95,11 +95,11 @@ func @triangular(%arg0: memref<?x?xf32>) { // where step is known to be %c2. // TILE_74: %[[diff2:.*]] = subi %[[i]], %c1 // TILE_74-NEXT: %[[diff2_adj:.*]] = addi %[[diff2]], %[[adjustment2]] - // TILE_74-NEXT: %[[range2:.*]] = divis %[[diff2_adj]], %c2 + // TILE_74-NEXT: %[[range2:.*]] = divi_signed %[[diff2_adj]], %c2 // Ceildiv to get the parametric tile size for the second original loop. // TILE_74: %[[sum2:.*]] = addi %[[range2]], %c3 - // TILE_74-NEXT: %[[size2:.*]] = divis %[[sum2]], %c4 + // TILE_74-NEXT: %[[size2:.*]] = divi_signed %[[sum2]], %c4 // New inner step (original is %c2). // TILE_74-NEXT: %[[step2:.*]] = muli %c2, %[[size2]] |