[Mlir-commits] [mlir] cd392c0 - [mlir][Linalg] NFC - Make more option names consistent.

Nicolas Vasilache llvmlistbot at llvm.org
Thu Oct 28 12:49:01 PDT 2021


Author: Nicolas Vasilache
Date: 2021-10-28T19:48:57Z
New Revision: cd392c0e9e2af95c38071dbcbc5e12b1265aac00

URL: https://github.com/llvm/llvm-project/commit/cd392c0e9e2af95c38071dbcbc5e12b1265aac00
DIFF: https://github.com/llvm/llvm-project/commit/cd392c0e9e2af95c38071dbcbc5e12b1265aac00.diff

LOG: [mlir][Linalg] NFC - Make more option names consistent.

Differential Revision: https://reviews.llvm.org/D112640

Added: 
    

Modified: 
    mlir/include/mlir/Conversion/Passes.td
    mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h
    mlir/include/mlir/Dialect/Vector/VectorOps.h
    mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
    mlir/lib/Dialect/Vector/VectorTransforms.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index bab88b804be3..8d806649d901 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -700,23 +700,23 @@ def ConvertVectorToLLVM : Pass<"convert-vector-to-llvm", "ModuleOp"> {
     Option<"reassociateFPReductions", "reassociate-fp-reductions",
            "bool", /*default=*/"false",
            "Allows llvm to reassociate floating-point reductions for speed">,
-    Option<"enableIndexOptimizations", "enable-index-optimizations",
+    Option<"indexOptimizations", "enable-index-optimizations",
            "bool", /*default=*/"true",
            "Allows compiler to assume indices fit in 32-bit if that yields "
 	   "faster code">,
-    Option<"enableAMX", "enable-amx",
+    Option<"amx", "enable-amx",
            "bool", /*default=*/"false",
            "Enables the use of AMX dialect while lowering the vector "
 	   "dialect.">,
-    Option<"enableArmNeon", "enable-arm-neon",
+    Option<"armNeon", "enable-arm-neon",
            "bool", /*default=*/"false",
            "Enables the use of ArmNeon dialect while lowering the vector "
 	   "dialect.">,
-    Option<"enableArmSVE", "enable-arm-sve",
+    Option<"armSVE", "enable-arm-sve",
            "bool", /*default=*/"false",
            "Enables the use of ArmSVE dialect while lowering the vector "
        "dialect.">,
-    Option<"enableX86Vector", "enable-x86vector",
+    Option<"x86Vector", "enable-x86vector",
            "bool", /*default=*/"false",
            "Enables the use of X86Vector dialect while lowering the vector "
 	   "dialect.">

diff  --git a/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h b/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h
index f721870cb338..84ca14fab48a 100644
--- a/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h
+++ b/mlir/include/mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h
@@ -22,41 +22,40 @@ class OperationPass;
 /// ConvertVectorToLLVM pass in include/mlir/Conversion/Passes.td
 struct LowerVectorToLLVMOptions {
   LowerVectorToLLVMOptions()
-      : reassociateFPReductions(false), enableIndexOptimizations(true),
-        enableArmNeon(false), enableArmSVE(false), enableAMX(false),
-        enableX86Vector(false) {}
+      : reassociateFPReductions(false), indexOptimizations(true),
+        armNeon(false), armSVE(false), amx(false), x86Vector(false) {}
 
-  LowerVectorToLLVMOptions &setReassociateFPReductions(bool b) {
+  LowerVectorToLLVMOptions &enableReassociateFPReductions(bool b = true) {
     reassociateFPReductions = b;
     return *this;
   }
-  LowerVectorToLLVMOptions &setEnableIndexOptimizations(bool b) {
-    enableIndexOptimizations = b;
+  LowerVectorToLLVMOptions &enableIndexOptimizations(bool b = true) {
+    indexOptimizations = b;
     return *this;
   }
-  LowerVectorToLLVMOptions &setEnableArmNeon(bool b) {
-    enableArmNeon = b;
+  LowerVectorToLLVMOptions &enableArmNeon(bool b = true) {
+    armNeon = b;
     return *this;
   }
-  LowerVectorToLLVMOptions &setEnableArmSVE(bool b) {
-    enableArmSVE = b;
+  LowerVectorToLLVMOptions &enableArmSVE(bool b = true) {
+    armSVE = b;
     return *this;
   }
-  LowerVectorToLLVMOptions &setEnableAMX(bool b) {
-    enableAMX = b;
+  LowerVectorToLLVMOptions &enableAMX(bool b = true) {
+    amx = b;
     return *this;
   }
-  LowerVectorToLLVMOptions &setEnableX86Vector(bool b) {
-    enableX86Vector = b;
+  LowerVectorToLLVMOptions &enableX86Vector(bool b = true) {
+    x86Vector = b;
     return *this;
   }
 
   bool reassociateFPReductions;
-  bool enableIndexOptimizations;
-  bool enableArmNeon;
-  bool enableArmSVE;
-  bool enableAMX;
-  bool enableX86Vector;
+  bool indexOptimizations;
+  bool armNeon;
+  bool armSVE;
+  bool amx;
+  bool x86Vector;
 };
 
 /// Collect a set of patterns to convert from Vector contractions to LLVM Matrix

diff  --git a/mlir/include/mlir/Dialect/Vector/VectorOps.h b/mlir/include/mlir/Dialect/Vector/VectorOps.h
index a29683431344..39d8a1917a20 100644
--- a/mlir/include/mlir/Dialect/Vector/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/VectorOps.h
@@ -83,7 +83,7 @@ void populateVectorTransferLoweringPatterns(
 
 /// These patterns materialize masks for various vector ops such as transfers.
 void populateVectorMaskMaterializationPatterns(RewritePatternSet &patterns,
-                                               bool enableIndexOptimizations);
+                                               bool indexOptimizations);
 
 /// Collect a set of patterns to propagate insert_map/extract_map in the ssa
 /// chain.

diff  --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
index 7d75f11d0e3d..7dbd28223b4b 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
@@ -34,24 +34,24 @@ struct LowerVectorToLLVMPass
     : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
   LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
     this->reassociateFPReductions = options.reassociateFPReductions;
-    this->enableIndexOptimizations = options.enableIndexOptimizations;
-    this->enableArmNeon = options.enableArmNeon;
-    this->enableArmSVE = options.enableArmSVE;
-    this->enableAMX = options.enableAMX;
-    this->enableX86Vector = options.enableX86Vector;
+    this->indexOptimizations = options.indexOptimizations;
+    this->armNeon = options.armNeon;
+    this->armSVE = options.armSVE;
+    this->amx = options.amx;
+    this->x86Vector = options.x86Vector;
   }
   // Override explicitly to allow conditional dialect dependence.
   void getDependentDialects(DialectRegistry &registry) const override {
     registry.insert<LLVM::LLVMDialect>();
     registry.insert<arith::ArithmeticDialect>();
     registry.insert<memref::MemRefDialect>();
-    if (enableArmNeon)
+    if (armNeon)
       registry.insert<arm_neon::ArmNeonDialect>();
-    if (enableArmSVE)
+    if (armSVE)
       registry.insert<arm_sve::ArmSVEDialect>();
-    if (enableAMX)
+    if (amx)
       registry.insert<amx::AMXDialect>();
-    if (enableX86Vector)
+    if (x86Vector)
       registry.insert<x86vector::X86VectorDialect>();
   }
   void runOnOperation() override;
@@ -77,7 +77,7 @@ void LowerVectorToLLVMPass::runOnOperation() {
   // Convert to the LLVM IR dialect.
   LLVMTypeConverter converter(&getContext());
   RewritePatternSet patterns(&getContext());
-  populateVectorMaskMaterializationPatterns(patterns, enableIndexOptimizations);
+  populateVectorMaskMaterializationPatterns(patterns, indexOptimizations);
   populateVectorTransferLoweringPatterns(patterns);
   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
   populateVectorToLLVMConversionPatterns(converter, patterns,
@@ -90,21 +90,21 @@ void LowerVectorToLLVMPass::runOnOperation() {
   target.addLegalDialect<memref::MemRefDialect>();
   target.addLegalDialect<StandardOpsDialect>();
   target.addLegalOp<UnrealizedConversionCastOp>();
-  if (enableArmNeon) {
+  if (armNeon) {
     // TODO: we may or may not want to include in-dialect lowering to
     // LLVM-compatible operations here. So far, all operations in the dialect
     // can be translated to LLVM IR so there is no conversion necessary.
     target.addLegalDialect<arm_neon::ArmNeonDialect>();
   }
-  if (enableArmSVE) {
+  if (armSVE) {
     configureArmSVELegalizeForExportTarget(target);
     populateArmSVELegalizeForLLVMExportPatterns(converter, patterns);
   }
-  if (enableAMX) {
+  if (amx) {
     configureAMXLegalizeForExportTarget(target);
     populateAMXLegalizeForLLVMExportPatterns(converter, patterns);
   }
-  if (enableX86Vector) {
+  if (x86Vector) {
     configureX86VectorLegalizeForExportTarget(target);
     populateX86VectorLegalizeForLLVMExportPatterns(converter, patterns);
   }

diff  --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
index efb22b9ab6be..cabeb58971d0 100644
--- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp
@@ -3402,7 +3402,7 @@ static Value createCastToIndexLike(PatternRewriter &rewriter, Location loc,
 //       generates more elaborate instructions for this intrinsic since it
 //       is very conservative on the boundary conditions.
 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op,
-                                   bool enableIndexOptimizations, int64_t dim,
+                                   bool indexOptimizations, int64_t dim,
                                    Value b, Value *off = nullptr) {
   auto loc = op->getLoc();
   // If we can assume all indices fit in 32-bit, we perform the vector
@@ -3410,7 +3410,7 @@ static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op,
   // Otherwise we perform the vector comparison using 64-bit indices.
   Value indices;
   Type idxType;
-  if (enableIndexOptimizations) {
+  if (indexOptimizations) {
     indices = rewriter.create<arith::ConstantOp>(
         loc, rewriter.getI32VectorAttr(
                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
@@ -3439,7 +3439,7 @@ struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> {
 public:
   explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt)
       : mlir::OpRewritePattern<ConcreteOp>(context),
-        enableIndexOptimizations(enableIndexOpt) {}
+        indexOptimizations(enableIndexOpt) {}
 
   LogicalResult matchAndRewrite(ConcreteOp xferOp,
                                 PatternRewriter &rewriter) const override {
@@ -3466,8 +3466,8 @@ struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> {
     Value off = xferOp.indices()[lastIndex];
     Value dim =
         vector::createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex);
-    Value mask = buildVectorComparison(
-        rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);
+    Value mask = buildVectorComparison(rewriter, xferOp, indexOptimizations,
+                                       vecWidth, dim, &off);
 
     if (xferOp.mask()) {
       // Intersect the in-bounds with the mask specified as an op parameter.
@@ -3483,7 +3483,7 @@ struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> {
   }
 
 private:
-  const bool enableIndexOptimizations;
+  const bool indexOptimizations;
 };
 
 /// Conversion pattern for a vector.create_mask (1-D only).
@@ -3493,7 +3493,7 @@ class VectorCreateMaskOpConversion
   explicit VectorCreateMaskOpConversion(MLIRContext *context,
                                         bool enableIndexOpt)
       : mlir::OpRewritePattern<vector::CreateMaskOp>(context),
-        enableIndexOptimizations(enableIndexOpt) {}
+        indexOptimizations(enableIndexOpt) {}
 
   LogicalResult matchAndRewrite(vector::CreateMaskOp op,
                                 PatternRewriter &rewriter) const override {
@@ -3501,7 +3501,7 @@ class VectorCreateMaskOpConversion
     int64_t rank = dstType.getRank();
     if (rank == 1) {
       rewriter.replaceOp(
-          op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
+          op, buildVectorComparison(rewriter, op, indexOptimizations,
                                     dstType.getDimSize(0), op.getOperand(0)));
       return success();
     }
@@ -3509,7 +3509,7 @@ class VectorCreateMaskOpConversion
   }
 
 private:
-  const bool enableIndexOptimizations;
+  const bool indexOptimizations;
 };
 
 // Drop inner most contiguous unit dimensions from transfer_read operand.
@@ -3587,11 +3587,11 @@ class DropInnerMostUnitDims : public OpRewritePattern<vector::TransferReadOp> {
 };
 
 void mlir::vector::populateVectorMaskMaterializationPatterns(
-    RewritePatternSet &patterns, bool enableIndexOptimizations) {
+    RewritePatternSet &patterns, bool indexOptimizations) {
   patterns.add<VectorCreateMaskOpConversion,
                MaterializeTransferMask<vector::TransferReadOp>,
                MaterializeTransferMask<vector::TransferWriteOp>>(
-      patterns.getContext(), enableIndexOptimizations);
+      patterns.getContext(), indexOptimizations);
 }
 
 void mlir::vector::populatePropagateVectorDistributionPatterns(


        


More information about the Mlir-commits mailing list