[Mlir-commits] [mlir] 65066c0 - [mlir] Use `create` instead of `createOrFold` for ConstantOp as folding has no effect (NFC) (#80129)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Jan 31 23:40:41 PST 2024


Author: Hugo Trachino
Date: 2024-01-31T23:40:37-08:00
New Revision: 65066c02770cc3da3b5154fbb7ed9df78ab94b93

URL: https://github.com/llvm/llvm-project/commit/65066c02770cc3da3b5154fbb7ed9df78ab94b93
DIFF: https://github.com/llvm/llvm-project/commit/65066c02770cc3da3b5154fbb7ed9df78ab94b93.diff

LOG: [mlir] Use `create` instead of `createOrFold` for ConstantOp as folding has no effect (NFC) (#80129)

This aims to clean-up confusing uses of
builder.createOrFold<ConstantOp> since folding of constants fails.

Added: 
    

Modified: 
    mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
    mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
    mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
    mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
    mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
index cc97ee74d48b6..12d2462061dcf 100644
--- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
+++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
@@ -38,7 +38,7 @@ static Value createI32Constant(ConversionPatternRewriter &rewriter,
 static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc,
                               bool value) {
   Type llvmI1 = rewriter.getI1Type();
-  return rewriter.createOrFold<LLVM::ConstantOp>(loc, llvmI1, value);
+  return rewriter.create<LLVM::ConstantOp>(loc, llvmI1, value);
 }
 
 namespace {
@@ -163,7 +163,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
     Value ptr = memrefDescriptor.alignedPtr(rewriter, loc);
     // The stride value is always 0 for raw buffers. This also disables
     // swizling.
-    Value stride = rewriter.createOrFold<LLVM::ConstantOp>(
+    Value stride = rewriter.create<LLVM::ConstantOp>(
         loc, llvmI16, rewriter.getI16IntegerAttr(0));
     Value numRecords;
     if (memrefType.hasStaticShape()) {

diff  --git a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
index c625a302a3970..b51a13ae362e9 100644
--- a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
+++ b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
@@ -89,7 +89,7 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
   }
   VectorType inType = in.getType().cast<VectorType>();
   int64_t numElements = inType.getNumElements();
-  Value zero = rewriter.createOrFold<arith::ConstantOp>(
+  Value zero = rewriter.create<arith::ConstantOp>(
       loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
   Value result =
       rewriter.createOrFold<vector::SplatOp>(loc, op.getOut().getType(), zero);
@@ -209,7 +209,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
   }
   VectorType outType = op.getOut().getType().cast<VectorType>();
   int64_t numElements = outType.getNumElements();
-  Value zero = rewriter.createOrFold<arith::ConstantOp>(
+  Value zero = rewriter.create<arith::ConstantOp>(
       loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
   Value result = rewriter.createOrFold<vector::SplatOp>(loc, outType, zero);
   if (outType.getShape().empty()) {

diff  --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 599bb13190f12..363e6016113b1 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -67,8 +67,8 @@ static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) {
 Value getLaneId(ConversionPatternRewriter &rewriter, Location loc,
                 const unsigned indexBitwidth) {
   auto int32Type = IntegerType::get(rewriter.getContext(), 32);
-  Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
-  Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
+  Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
+  Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
   Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, int32Type,
                                                     ValueRange{minus1, zero});
   Value laneId = rewriter.create<ROCDL::MbcntHiOp>(loc, int32Type,
@@ -89,8 +89,8 @@ struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
     // followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo)
 
     Type intTy = IntegerType::get(context, 32);
-    Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
-    Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
+    Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
+    Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
     Value mbcntLo =
         rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero});
     Value laneId = rewriter.create<ROCDL::MbcntHiOp>(

diff  --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 06ec53d19b1e9..505d85f211111 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -327,7 +327,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
     highValues.reserve(rank);
 
     for (int i = 0; i < rank; i++) {
-      Value inputIndex = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i);
+      Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i);
       Value lowVal = rewriter.createOrFold<tensor::ExtractOp>(
           loc, padding, ValueRange({inputIndex, lowIndex}));
       Value highVal = rewriter.createOrFold<tensor::ExtractOp>(
@@ -360,8 +360,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
 
     Location loc = op.getLoc();
     int axis = op.getAxis();
-    Value axisValue = rewriter.createOrFold<arith::ConstantOp>(
-        loc, rewriter.getIndexAttr(axis));
+    Value axisValue =
+        rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(axis));
     int64_t rank = resultType.getRank();
 
     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));

diff  --git a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
index 2108fc591055a..7c8403c9609d8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
@@ -44,8 +44,8 @@ struct DecomposeTensorConcatOp : public OpRewritePattern<ConcatOp> {
       return failure();
 
     int64_t dim = concatOp.getDim();
-    Value dimValue = rewriter.createOrFold<arith::ConstantOp>(
-        loc, rewriter.getIndexAttr(dim));
+    Value dimValue =
+        rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(dim));
 
     int64_t rank = concatOp.getResultType().getRank();
     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));


        


More information about the Mlir-commits mailing list