[Mlir-commits] [mlir] bb56c2b - Fix clang-tidy issues in mlir/ (NFC)

Mehdi Amini llvmlistbot at llvm.org
Mon Dec 20 16:58:27 PST 2021


Author: Mehdi Amini
Date: 2021-12-21T00:50:07Z
New Revision: bb56c2b3669591f1e46d9ae0566b8203ba7cb93d

URL: https://github.com/llvm/llvm-project/commit/bb56c2b3669591f1e46d9ae0566b8203ba7cb93d
DIFF: https://github.com/llvm/llvm-project/commit/bb56c2b3669591f1e46d9ae0566b8203ba7cb93d.diff

LOG: Fix clang-tidy issues in mlir/ (NFC)

Differential Revision: https://reviews.llvm.org/D115956

Added: 
    

Modified: 
    mlir/lib/Bindings/Python/IRAttributes.cpp
    mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
    mlir/lib/Dialect/Shape/IR/Shape.cpp
    mlir/lib/ExecutionEngine/SparseTensorUtils.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp
index eed6369acb2fb..f1206617d0690 100644
--- a/mlir/lib/Bindings/Python/IRAttributes.cpp
+++ b/mlir/lib/Bindings/Python/IRAttributes.cpp
@@ -513,11 +513,13 @@ class PyDenseElementsAttribute
     if (mlirTypeIsAF64(elementType)) {
       // f64
       return bufferInfo<double>(shapedType);
-    } else if (mlirTypeIsAF16(elementType)) {
+    }
+    if (mlirTypeIsAF16(elementType)) {
       // f16
       return bufferInfo<uint16_t>(shapedType, "e");
-    } else if (mlirTypeIsAInteger(elementType) &&
-               mlirIntegerTypeGetWidth(elementType) == 32) {
+    }
+    if (mlirTypeIsAInteger(elementType) &&
+        mlirIntegerTypeGetWidth(elementType) == 32) {
       if (mlirIntegerTypeIsSignless(elementType) ||
           mlirIntegerTypeIsSigned(elementType)) {
         // i32

diff  --git a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
index a17b77dea2f35..43f538b62a236 100644
--- a/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
+++ b/mlir/lib/Dialect/Quant/IR/QuantTypes.cpp
@@ -113,7 +113,8 @@ Type QuantizedType::castFromStorageType(Type candidateType) {
   if (candidateType.isa<UnrankedTensorType>()) {
     // i.e. tensor<i8> -> tensor<!quant<"uniform[i8:f32]{1.0}">>
     return UnrankedTensorType::get(getStorageType());
-  } else if (candidateType.isa<VectorType>()) {
+  }
+  if (candidateType.isa<VectorType>()) {
     // i.e. tensor<4xi8> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
     return VectorType::get(candidateType.cast<VectorType>().getShape(),
                            getStorageType());
@@ -140,7 +141,8 @@ Type QuantizedType::castToStorageType(Type quantizedType) {
     }
     if (quantizedType.isa<UnrankedTensorType>()) {
       return UnrankedTensorType::get(storageType);
-    } else if (quantizedType.isa<VectorType>()) {
+    }
+    if (quantizedType.isa<VectorType>()) {
       return VectorType::get(sType.getShape(), storageType);
     }
   }
@@ -166,7 +168,8 @@ Type QuantizedType::castFromExpressedType(Type candidateType) {
     if (candidateType.isa<UnrankedTensorType>()) {
       // i.e. tensor<xf32> -> tensor<x!quant<"uniform[i8:f32]{1.0}">>
       return UnrankedTensorType::get(*this);
-    } else if (candidateType.isa<VectorType>()) {
+    }
+    if (candidateType.isa<VectorType>()) {
       // i.e. tensor<4xf32> -> tensor<4x!quant<"uniform[i8:f32]{1.0}">>
       return VectorType::get(candidateShapedType.getShape(), *this);
     }
@@ -193,7 +196,8 @@ Type QuantizedType::castToExpressedType(Type quantizedType) {
     }
     if (quantizedType.isa<UnrankedTensorType>()) {
       return UnrankedTensorType::get(expressedType);
-    } else if (quantizedType.isa<VectorType>()) {
+    }
+    if (quantizedType.isa<VectorType>()) {
       return VectorType::get(sType.getShape(), expressedType);
     }
   }

diff  --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
index 80c46a13b7611..db86e22886734 100644
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -54,13 +54,13 @@ LogicalResult shape::getShapeVec(Value input,
   if (auto inputOp = input.getDefiningOp<ConstShapeOp>()) {
     shapeValues = llvm::to_vector<6>(inputOp.getShape().getValues<int64_t>());
     return success();
-  } else if (auto inputOp = input.getDefiningOp<arith::ConstantOp>()) {
+  }
+  if (auto inputOp = input.getDefiningOp<arith::ConstantOp>()) {
     shapeValues = llvm::to_vector<6>(
         inputOp.getValue().cast<DenseIntElementsAttr>().getValues<int64_t>());
     return success();
-  } else {
-    return failure();
   }
+  return failure();
 }
 
 static bool isErrorPropagationPossible(TypeRange operandTypes) {

diff  --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
index 364d49d37bdce..99d467e34e2cd 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
@@ -550,7 +550,7 @@ static char *toLower(char *token) {
 
 /// Read the MME header of a general sparse matrix of type real.
 static void readMMEHeader(FILE *file, char *filename, char *line,
-                          uint64_t *idata, bool *is_symmetric) {
+                          uint64_t *idata, bool *isSymmetric) {
   char header[64];
   char object[64];
   char format[64];
@@ -562,12 +562,12 @@ static void readMMEHeader(FILE *file, char *filename, char *line,
     fprintf(stderr, "Corrupt header in %s\n", filename);
     exit(1);
   }
-  *is_symmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
+  *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
   // Make sure this is a general sparse matrix.
   if (strcmp(toLower(header), "%%matrixmarket") ||
       strcmp(toLower(object), "matrix") ||
       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
-      (strcmp(toLower(symmetry), "general") && !(*is_symmetric))) {
+      (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) {
     fprintf(stderr,
             "Cannot find a general sparse matrix with type real in %s\n",
             filename);
@@ -636,9 +636,9 @@ static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
   // Perform some file format dependent set up.
   char line[kColWidth];
   uint64_t idata[512];
-  bool is_symmetric = false;
+  bool isSymmetric = false;
   if (strstr(filename, ".mtx")) {
-    readMMEHeader(file, filename, line, idata, &is_symmetric);
+    readMMEHeader(file, filename, line, idata, &isSymmetric);
   } else if (strstr(filename, ".tns")) {
     readExtFROSTTHeader(file, filename, line, idata);
   } else {
@@ -674,7 +674,7 @@ static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
     // We currently chose to deal with symmetric matrices by fully constructing
     // them. In the future, we may want to make symmetry implicit for storage
     // reasons.
-    if (is_symmetric && indices[0] != indices[1])
+    if (isSymmetric && indices[0] != indices[1])
       tensor->add({indices[1], indices[0]}, value);
   }
   // Close the file and return tensor.
@@ -718,17 +718,16 @@ typedef uint64_t index_t;
       }                                                                        \
       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm,  \
                                                            sparsity, tensor);  \
-    } else if (action == Action::kEmptyCOO) {                                  \
+    }                                                                          \
+    if (action == Action::kEmptyCOO)                                           \
       return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm);        \
+    tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);    \
+    if (action == Action::kToIterator) {                                       \
+      tensor->startIterator();                                                 \
     } else {                                                                   \
-      tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);  \
-      if (action == Action::kToIterator) {                                     \
-        tensor->startIterator();                                               \
-      } else {                                                                 \
-        assert(action == Action::kToCOO);                                      \
-      }                                                                        \
-      return tensor;                                                           \
+      assert(action == Action::kToCOO);                                        \
     }                                                                          \
+    return tensor;                                                             \
   }
 
 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
@@ -1092,15 +1091,15 @@ void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape,
 //
 //  TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
 //
-void convertFromMLIRSparseTensor(void *tensor, uint64_t *p_rank,
-                                 uint64_t *p_nse, uint64_t **p_shape,
-                                 double **p_values, uint64_t **p_indices) {
-  SparseTensorStorage<uint64_t, uint64_t, double> *sparse_tensor =
+void convertFromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse,
+                                 uint64_t **pShape, double **pValues,
+                                 uint64_t **pIndices) {
+  SparseTensorStorage<uint64_t, uint64_t, double> *sparseTensor =
       static_cast<SparseTensorStorage<uint64_t, uint64_t, double> *>(tensor);
-  uint64_t rank = sparse_tensor->getRank();
+  uint64_t rank = sparseTensor->getRank();
   std::vector<uint64_t> perm(rank);
   std::iota(perm.begin(), perm.end(), 0);
-  SparseTensorCOO<double> *coo = sparse_tensor->toCOO(perm.data());
+  SparseTensorCOO<double> *coo = sparseTensor->toCOO(perm.data());
 
   const std::vector<Element<double>> &elements = coo->getElements();
   uint64_t nse = elements.size();
@@ -1120,11 +1119,11 @@ void convertFromMLIRSparseTensor(void *tensor, uint64_t *p_rank,
   }
 
   delete coo;
-  *p_rank = rank;
-  *p_nse = nse;
-  *p_shape = shape;
-  *p_values = values;
-  *p_indices = indices;
+  *pRank = rank;
+  *pNse = nse;
+  *pShape = shape;
+  *pValues = values;
+  *pIndices = indices;
 }
 } // extern "C"
 


        


More information about the Mlir-commits mailing list