diff options
| author | River Riddle <riverriddle@google.com> | 2019-12-22 21:59:55 -0800 |
|---|---|---|
| committer | A. Unique TensorFlower <gardener@tensorflow.org> | 2019-12-22 22:00:23 -0800 |
| commit | 35807bc4c5c9d8abc31ba0b2f955a82abf276e12 (patch) | |
| tree | d083d37d993a774239081509a50e3e6c65366421 /mlir/lib/Quantizer/Transforms | |
| parent | 22954a0e408afde1d8686dffb3a3dcab107a2cd3 (diff) | |
| download | bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.tar.gz bcm5719-llvm-35807bc4c5c9d8abc31ba0b2f955a82abf276e12.zip | |
NFC: Introduce new ValuePtr/ValueRef typedefs to simplify the transition to Value being value-typed.
This is an initial step to refactoring the representation of OpResult as proposed in: https://groups.google.com/a/tensorflow.org/g/mlir/c/XXzzKhqqF_0/m/v6bKb08WCgAJ
This change will make it much simpler to incrementally transition all of the existing code to use value-typed semantics.
PiperOrigin-RevId: 286844725
Diffstat (limited to 'mlir/lib/Quantizer/Transforms')
| -rw-r--r-- | mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp | 2 | ||||
| -rw-r--r-- | mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp | 14 |
2 files changed, 8 insertions, 8 deletions
diff --git a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp index a32bb2c9b3c..a3cbe214040 100644 --- a/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp +++ b/mlir/lib/Quantizer/Transforms/AddDefaultStatsTestPass.cpp @@ -74,7 +74,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext, auto func = getFunction(); // Insert stats for each argument. - for (auto *arg : func.getArguments()) { + for (auto arg : func.getArguments()) { if (!config.isHandledType(arg->getType())) continue; OpBuilder b(func.getBody()); diff --git a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp index 511df0a463f..68c263bc423 100644 --- a/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp +++ b/mlir/lib/Quantizer/Transforms/InferQuantizedTypesPass.cpp @@ -181,17 +181,17 @@ void InferQuantizedTypesPass::runWithConfig(SolverContext &solverContext, void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, Type newType) { - Value *inputValue = anchor->getValue(); + ValuePtr inputValue = anchor->getValue(); Operation *op = anchor->getOp(); OpBuilder b(op->getBlock(), Block::iterator(op)); - SmallVector<Value *, 1> removeValuesIfDead; + SmallVector<ValuePtr, 1> removeValuesIfDead; // Because we've already run the result transforms at this phase, it is // very likely that inputValue points to a dcast op whose input matches // our type. We detect that situation and route around just to save some // bulk in the IR. - Value *newTypedInputValue = inputValue; + ValuePtr newTypedInputValue = inputValue; auto inputDcastOp = dyn_cast_or_null<DequantizeCastOp>(inputValue->getDefiningOp()); if (inputDcastOp && inputDcastOp.arg()->getType() == newType) { @@ -228,7 +228,7 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, break; } - for (Value *removeValueIfDead : removeValuesIfDead) { + for (ValuePtr removeValueIfDead : removeValuesIfDead) { if (removeValueIfDead->use_empty()) { removeValueIfDead->getDefiningOp()->erase(); } @@ -237,12 +237,12 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor, void InferQuantizedTypesPass::transformResultType(CAGResultAnchor *anchor, Type newType) { - Value *origResultValue = anchor->getValue(); + ValuePtr origResultValue = anchor->getValue(); Operation *op = origResultValue->getDefiningOp(); OpBuilder b(op->getBlock(), ++Block::iterator(op)); - Value *replacedResultValue = nullptr; - Value *newResultValue = nullptr; + ValuePtr replacedResultValue = nullptr; + ValuePtr newResultValue = nullptr; switch (anchor->getTypeTransformRule()) { case CAGAnchorNode::TypeTransformRule::Direct: origResultValue->setType(newType); |

