[Mlir-commits] [mlir] Switch member calls to `isa/dyn_cast/cast/...` to free function calls. (PR #89356)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Apr 19 02:13:45 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
@llvm/pr-subscribers-mlir-sparse
@llvm/pr-subscribers-mlir-memref

@llvm/pr-subscribers-mlir-tensor

Author: Christian Sigg (chsigg)

<details>
<summary>Changes</summary>

This change cleans up call sites. Next step is to mark the member functions deprecated.

See https://mlir.llvm.org/deprecation and https://discourse.llvm.org/t/preferred-casting-style-going-forward.

---

Patch is 108.29 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/89356.diff


80 Files Affected:

- (modified) mlir/examples/transform/Ch4/lib/MyExtension.cpp (+1-1) 
- (modified) mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterfaceImpl.h (+3-3) 
- (modified) mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h (+3-3) 
- (modified) mlir/include/mlir/IR/Location.h (+1-1) 
- (modified) mlir/lib/CAPI/Dialect/LLVM.cpp (+3-3) 
- (modified) mlir/lib/CAPI/IR/BuiltinTypes.cpp (+2-2) 
- (modified) mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp (+5-6) 
- (modified) mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp (+6-6) 
- (modified) mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp (+3-4) 
- (modified) mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp (+1-1) 
- (modified) mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp (+7-11) 
- (modified) mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp (+9-11) 
- (modified) mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp (+2-2) 
- (modified) mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (+1-2) 
- (modified) mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp (+1-1) 
- (modified) mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp (+7-7) 
- (modified) mlir/lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Complex/IR/ComplexOps.cpp (+1-1) 
- (modified) mlir/lib/Dialect/ControlFlow/Transforms/BufferDeallocationOpInterfaceImpl.cpp (+1-1) 
- (modified) mlir/lib/Dialect/EmitC/IR/EmitC.cpp (+10-10) 
- (modified) mlir/lib/Dialect/GPU/TransformOps/GPUTransformOps.cpp (+3-3) 
- (modified) mlir/lib/Dialect/IRDL/IRDLLoading.cpp (+2-2) 
- (modified) mlir/lib/Dialect/LLVMIR/IR/BasicPtxBuilderInterface.cpp (+1-1) 
- (modified) mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp (+5-5) 
- (modified) mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp (+4-6) 
- (modified) mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp (+14-14) 
- (modified) mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp (+4-5) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/EliminateEmptyTensors.cpp (+1-2) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp (+2-2) 
- (modified) mlir/lib/Dialect/MemRef/TransformOps/MemRefTransformOps.cpp (+2-2) 
- (modified) mlir/lib/Dialect/MemRef/Transforms/EmulateNarrowType.cpp (+2-2) 
- (modified) mlir/lib/Dialect/MemRef/Transforms/ExpandRealloc.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Mesh/IR/MeshOps.cpp (+10-9) 
- (modified) mlir/lib/Dialect/Mesh/Interfaces/ShardingInterface.cpp (+5-5) 
- (modified) mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp (+29-37) 
- (modified) mlir/lib/Dialect/Mesh/Transforms/Transforms.cpp (+4-5) 
- (modified) mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp (+4-4) 
- (modified) mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/IR/Detail/Var.cpp (+2-2) 
- (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorInterfaces.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseAssembler.cpp (+2-2) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseReinterpretMap.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp (+3-3) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp (+1-1) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp (+1-2) 
- (modified) mlir/lib/Dialect/Tensor/IR/TensorOps.cpp (+3-4) 
- (modified) mlir/lib/Dialect/Tosa/IR/ShardingInterfaceImpl.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Tosa/IR/TosaOps.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Transform/DebugExtension/DebugExtensionOps.cpp (+3-4) 
- (modified) mlir/lib/Dialect/Transform/IR/TransformOps.cpp (+3-3) 
- (modified) mlir/lib/Dialect/Transform/IR/TransformTypes.cpp (+2-2) 
- (modified) mlir/lib/Dialect/Vector/IR/VectorOps.cpp (+1-2) 
- (modified) mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp (+1-1) 
- (modified) mlir/lib/Dialect/Vector/Transforms/VectorTransferSplitRewritePatterns.cpp (+1-1) 
- (modified) mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp (+1-1) 
- (modified) mlir/lib/IR/AffineMap.cpp (+2-2) 
- (modified) mlir/lib/IR/Operation.cpp (+1-1) 
- (modified) mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp (+11-14) 
- (modified) mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp (+2-2) 
- (modified) mlir/test/lib/Conversion/MathToVCIX/TestMathToVCIXConversion.cpp (+2-2) 
- (modified) mlir/test/lib/Dialect/Mesh/TestReshardingSpmdization.cpp (+2-3) 
- (modified) mlir/test/lib/Dialect/Test/TestToLLVMIRTranslation.cpp (+1-1) 
- (modified) mlir/test/lib/IR/TestAffineWalk.cpp (+1-1) 
- (modified) mlir/test/lib/IR/TestBuiltinAttributeInterfaces.cpp (+1-1) 
- (modified) mlir/test/lib/Rewrite/TestPDLByteCode.cpp (+1-1) 


``````````diff
diff --git a/mlir/examples/transform/Ch4/lib/MyExtension.cpp b/mlir/examples/transform/Ch4/lib/MyExtension.cpp
index 26e348f2a30ec6..83e2dcd750bb39 100644
--- a/mlir/examples/transform/Ch4/lib/MyExtension.cpp
+++ b/mlir/examples/transform/Ch4/lib/MyExtension.cpp
@@ -142,7 +142,7 @@ mlir::transform::HasOperandSatisfyingOp::apply(
     transform::detail::prepareValueMappings(
         yieldedMappings, getBody().front().getTerminator()->getOperands(),
         state);
-    results.setParams(getPosition().cast<OpResult>(),
+    results.setParams(cast<OpResult>(getPosition()),
                       {rewriter.getI32IntegerAttr(operand.getOperandNumber())});
     for (auto &&[result, mapping] : llvm::zip(getResults(), yieldedMappings))
       results.setMappedValues(result, mapping);
diff --git a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterfaceImpl.h b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterfaceImpl.h
index ab4df2ab028d43..5e4b4f3a66af9d 100644
--- a/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterfaceImpl.h
+++ b/mlir/include/mlir/Dialect/Mesh/Interfaces/ShardingInterfaceImpl.h
@@ -87,7 +87,7 @@ struct IndependentParallelIteratorDomainShardingInterface
   void
   populateIteratorTypes(Type t,
                         SmallVector<utils::IteratorType> &iterTypes) const {
-    RankedTensorType rankedTensorType = t.dyn_cast<RankedTensorType>();
+    RankedTensorType rankedTensorType = dyn_cast<RankedTensorType>(t);
     if (!rankedTensorType) {
       return;
     }
@@ -106,7 +106,7 @@ struct ElementwiseShardingInterface
           ElementwiseShardingInterface<ElemwiseOp>, ElemwiseOp> {
   SmallVector<utils::IteratorType> getLoopIteratorTypes(Operation *op) const {
     Value val = op->getOperand(0);
-    auto type = val.getType().dyn_cast<RankedTensorType>();
+    auto type = dyn_cast<RankedTensorType>(val.getType());
     if (!type)
       return {};
     SmallVector<utils::IteratorType> types(type.getRank(),
@@ -117,7 +117,7 @@ struct ElementwiseShardingInterface
   SmallVector<AffineMap> getIndexingMaps(Operation *op) const {
     MLIRContext *ctx = op->getContext();
     Value val = op->getOperand(0);
-    auto type = val.getType().dyn_cast<RankedTensorType>();
+    auto type = dyn_cast<RankedTensorType>(val.getType());
     if (!type)
       return {};
     int64_t rank = type.getRank();
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
index a9bc3351f4cff0..ec3c2cb011c357 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
@@ -60,11 +60,11 @@ class MulOperandsAndResultElementType
     if (llvm::isa<FloatType>(resElemType))
       return impl::verifySameOperandsAndResultElementType(op);
 
-    if (auto resIntType = resElemType.dyn_cast<IntegerType>()) {
+    if (auto resIntType = dyn_cast<IntegerType>(resElemType)) {
       IntegerType lhsIntType =
-          getElementTypeOrSelf(op->getOperand(0)).cast<IntegerType>();
+          cast<IntegerType>(getElementTypeOrSelf(op->getOperand(0)));
       IntegerType rhsIntType =
-          getElementTypeOrSelf(op->getOperand(1)).cast<IntegerType>();
+          cast<IntegerType>(getElementTypeOrSelf(op->getOperand(1)));
       if (lhsIntType != rhsIntType)
         return op->emitOpError(
             "requires the same element type for all operands");
diff --git a/mlir/include/mlir/IR/Location.h b/mlir/include/mlir/IR/Location.h
index d4268e804f4f7a..aa8314f38cdfac 100644
--- a/mlir/include/mlir/IR/Location.h
+++ b/mlir/include/mlir/IR/Location.h
@@ -154,7 +154,7 @@ class FusedLocWith : public FusedLoc {
   /// Support llvm style casting.
   static bool classof(Attribute attr) {
     auto fusedLoc = llvm::dyn_cast<FusedLoc>(attr);
-    return fusedLoc && fusedLoc.getMetadata().isa_and_nonnull<MetadataT>();
+    return fusedLoc && mlir::isa_and_nonnull<MetadataT>(fusedLoc.getMetadata());
   }
 };
 
diff --git a/mlir/lib/CAPI/Dialect/LLVM.cpp b/mlir/lib/CAPI/Dialect/LLVM.cpp
index 4669c40f843d94..21c66f38a8af03 100644
--- a/mlir/lib/CAPI/Dialect/LLVM.cpp
+++ b/mlir/lib/CAPI/Dialect/LLVM.cpp
@@ -135,7 +135,7 @@ MlirAttribute mlirLLVMDIExpressionAttrGet(MlirContext ctx, intptr_t nOperations,
       unwrap(ctx),
       llvm::map_to_vector(
           unwrapList(nOperations, operations, attrStorage),
-          [](Attribute a) { return a.cast<DIExpressionElemAttr>(); })));
+          [](Attribute a) { return cast<DIExpressionElemAttr>(a); })));
 }
 
 MlirAttribute mlirLLVMDINullTypeAttrGet(MlirContext ctx) {
@@ -165,7 +165,7 @@ MlirAttribute mlirLLVMDICompositeTypeAttrGet(
       cast<DIScopeAttr>(unwrap(scope)), cast<DITypeAttr>(unwrap(baseType)),
       DIFlags(flags), sizeInBits, alignInBits,
       llvm::map_to_vector(unwrapList(nElements, elements, elementsStorage),
-                          [](Attribute a) { return a.cast<DINodeAttr>(); })));
+                          [](Attribute a) { return cast<DINodeAttr>(a); })));
 }
 
 MlirAttribute
@@ -259,7 +259,7 @@ MlirAttribute mlirLLVMDISubroutineTypeAttrGet(MlirContext ctx,
   return wrap(DISubroutineTypeAttr::get(
       unwrap(ctx), callingConvention,
       llvm::map_to_vector(unwrapList(nTypes, types, attrStorage),
-                          [](Attribute a) { return a.cast<DITypeAttr>(); })));
+                          [](Attribute a) { return cast<DITypeAttr>(a); })));
 }
 
 MlirAttribute mlirLLVMDISubprogramAttrGet(
diff --git a/mlir/lib/CAPI/IR/BuiltinTypes.cpp b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
index e1a5d82587cf9e..c94c070144a7e9 100644
--- a/mlir/lib/CAPI/IR/BuiltinTypes.cpp
+++ b/mlir/lib/CAPI/IR/BuiltinTypes.cpp
@@ -311,11 +311,11 @@ MlirType mlirVectorTypeGetScalableChecked(MlirLocation loc, intptr_t rank,
 }
 
 bool mlirVectorTypeIsScalable(MlirType type) {
-  return unwrap(type).cast<VectorType>().isScalable();
+  return cast<VectorType>(unwrap(type)).isScalable();
 }
 
 bool mlirVectorTypeIsDimScalable(MlirType type, intptr_t dim) {
-  return unwrap(type).cast<VectorType>().getScalableDims()[dim];
+  return cast<VectorType>(unwrap(type)).getScalableDims()[dim];
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
index 7e073bae75c0c9..033e66c6118f30 100644
--- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
+++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
@@ -371,7 +371,7 @@ static void wmmaPushInputOperand(ConversionPatternRewriter &rewriter,
                                  bool isUnsigned, Value llvmInput,
                                  SmallVector<Value, 4> &operands) {
   Type inputType = llvmInput.getType();
-  auto vectorType = inputType.dyn_cast<VectorType>();
+  auto vectorType = dyn_cast<VectorType>(inputType);
   Type elemType = vectorType.getElementType();
 
   if (elemType.isBF16())
@@ -414,7 +414,7 @@ static void wmmaPushOutputOperand(ConversionPatternRewriter &rewriter,
                                   Value output, int32_t subwordOffset,
                                   bool clamp, SmallVector<Value, 4> &operands) {
   Type inputType = output.getType();
-  auto vectorType = inputType.dyn_cast<VectorType>();
+  auto vectorType = dyn_cast<VectorType>(inputType);
   Type elemType = vectorType.getElementType();
   if (elemType.isBF16())
     output = rewriter.create<LLVM::BitcastOp>(
@@ -569,9 +569,8 @@ static std::optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma,
 /// on the architecture you are compiling for.
 static std::optional<StringRef> wmmaOpToIntrinsic(WMMAOp wmma,
                                                   Chipset chipset) {
-
-  auto sourceVectorType = wmma.getSourceA().getType().dyn_cast<VectorType>();
-  auto destVectorType = wmma.getDestC().getType().dyn_cast<VectorType>();
+  auto sourceVectorType = dyn_cast<VectorType>(wmma.getSourceA().getType());
+  auto destVectorType = dyn_cast<VectorType>(wmma.getDestC().getType());
   auto elemSourceType = sourceVectorType.getElementType();
   auto elemDestType = destVectorType.getElementType();
 
@@ -727,7 +726,7 @@ LogicalResult ExtPackedFp8OpLowering::matchAndRewrite(
   Type f32 = getTypeConverter()->convertType(op.getResult().getType());
 
   Value source = adaptor.getSource();
-  auto sourceVecType = op.getSource().getType().dyn_cast<VectorType>();
+  auto sourceVecType = dyn_cast<VectorType>(op.getSource().getType());
   Type sourceElemType = getElementTypeOrSelf(op.getSource());
   // Extend to a v4i8
   if (!sourceVecType || sourceVecType.getNumElements() < 4) {
diff --git a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
index 0113a3df0b8e3d..3d3ff001c541b5 100644
--- a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
+++ b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
@@ -65,7 +65,7 @@ static Value castF32To(Type elementType, Value f32, Location loc,
 
 LogicalResult ExtFOnFloat8RewritePattern::match(arith::ExtFOp op) const {
   Type inType = op.getIn().getType();
-  if (auto inVecType = inType.dyn_cast<VectorType>()) {
+  if (auto inVecType = dyn_cast<VectorType>(inType)) {
     if (inVecType.isScalable())
       return failure();
     if (inVecType.getShape().size() > 1)
@@ -81,13 +81,13 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
   Location loc = op.getLoc();
   Value in = op.getIn();
   Type outElemType = getElementTypeOrSelf(op.getOut().getType());
-  if (!in.getType().isa<VectorType>()) {
+  if (!isa<VectorType>(in.getType())) {
     Value asFloat = rewriter.create<amdgpu::ExtPackedFp8Op>(
         loc, rewriter.getF32Type(), in, 0);
     Value result = castF32To(outElemType, asFloat, loc, rewriter);
     return rewriter.replaceOp(op, result);
   }
-  VectorType inType = in.getType().cast<VectorType>();
+  VectorType inType = cast<VectorType>(in.getType());
   int64_t numElements = inType.getNumElements();
   Value zero = rewriter.create<arith::ConstantOp>(
       loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
@@ -179,7 +179,7 @@ LogicalResult TruncFToFloat8RewritePattern::match(arith::TruncFOp op) const {
   if (op.getRoundingmodeAttr())
     return failure();
   Type outType = op.getOut().getType();
-  if (auto outVecType = outType.dyn_cast<VectorType>()) {
+  if (auto outVecType = dyn_cast<VectorType>(outType)) {
     if (outVecType.isScalable())
       return failure();
     if (outVecType.getShape().size() > 1)
@@ -202,7 +202,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
   if (saturateFP8)
     in = clampInput(rewriter, loc, outElemType, in);
   VectorType truncResType = VectorType::get(4, outElemType);
-  if (!in.getType().isa<VectorType>()) {
+  if (!isa<VectorType>(in.getType())) {
     Value asFloat = castToF32(in, loc, rewriter);
     Value asF8s = rewriter.create<amdgpu::PackedTrunc2xFp8Op>(
         loc, truncResType, asFloat, /*sourceB=*/nullptr, 0,
@@ -210,7 +210,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
     Value result = rewriter.create<vector::ExtractOp>(loc, asF8s, 0);
     return rewriter.replaceOp(op, result);
   }
-  VectorType outType = op.getOut().getType().cast<VectorType>();
+  VectorType outType = cast<VectorType>(op.getOut().getType());
   int64_t numElements = outType.getNumElements();
   Value zero = rewriter.create<arith::ConstantOp>(
       loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
index 993c09b03c0fde..36e10372e4bc5b 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
@@ -214,7 +214,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
        llvm::enumerate(gpuFuncOp.getArgumentTypes())) {
     auto remapping = signatureConversion.getInputMapping(idx);
     NamedAttrList argAttr =
-        argAttrs ? argAttrs[idx].cast<DictionaryAttr>() : NamedAttrList();
+        argAttrs ? cast<DictionaryAttr>(argAttrs[idx]) : NamedAttrList();
     auto copyAttribute = [&](StringRef attrName) {
       Attribute attr = argAttr.erase(attrName);
       if (!attr)
@@ -234,9 +234,8 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
         return;
       }
       for (size_t i = 0, e = remapping->size; i < e; ++i) {
-        if (llvmFuncOp.getArgument(remapping->inputNo + i)
-                .getType()
-                .isa<LLVM::LLVMPointerType>()) {
+        if (isa<LLVM::LLVMPointerType>(
+                llvmFuncOp.getArgument(remapping->inputNo + i).getType())) {
           llvmFuncOp.setArgAttr(remapping->inputNo + i, attrName, attr);
         }
       }
diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index 78d4e806246872..3a4fc7d8063f40 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -668,7 +668,7 @@ static int32_t getCuSparseLtDataTypeFrom(Type type) {
 static int32_t getCuSparseDataTypeFrom(Type type) {
   if (llvm::isa<ComplexType>(type)) {
     // get the element type
-    auto elementType = type.cast<ComplexType>().getElementType();
+    auto elementType = cast<ComplexType>(type).getElementType();
     if (elementType.isBF16())
       return 15; // CUDA_C_16BF
     if (elementType.isF16())
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 9b5d19ebd783a9..11d29754aa760e 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -1579,7 +1579,7 @@ struct NVGPUWarpgroupMmaStoreOpLowering
     if (offset)
       ti = makeAdd(ti, makeConst(offset));
 
-    auto structType = matrixD.getType().cast<LLVM::LLVMStructType>();
+    auto structType = cast<LLVM::LLVMStructType>(matrixD.getType());
 
     // Number of 32-bit registers owns per thread
     constexpr unsigned numAdjacentRegisters = 2;
@@ -1606,9 +1606,9 @@ struct NVGPUWarpgroupMmaStoreOpLowering
     int offset = 0;
     ImplicitLocOpBuilder b(op->getLoc(), rewriter);
     Value matriDValue = adaptor.getMatrixD();
-    auto stype = matriDValue.getType().cast<LLVM::LLVMStructType>();
+    auto stype = cast<LLVM::LLVMStructType>(matriDValue.getType());
     for (auto [idx, matrixD] : llvm::enumerate(stype.getBody())) {
-      auto structType = matrixD.cast<LLVM::LLVMStructType>();
+      auto structType = cast<LLVM::LLVMStructType>(matrixD);
       Value innerStructValue = b.create<LLVM::ExtractValueOp>(matriDValue, idx);
       storeFragmentedMatrix(b, innerStructValue, op.getDstMemref(), offset);
       offset += structType.getBody().size();
@@ -1626,13 +1626,9 @@ struct NVGPUWarpgroupMmaInitAccumulatorOpLowering
   matchAndRewrite(nvgpu::WarpgroupMmaInitAccumulatorOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     ImplicitLocOpBuilder b(op->getLoc(), rewriter);
-    LLVM::LLVMStructType packStructType =
-        getTypeConverter()
-            ->convertType(op.getMatrixC().getType())
-            .cast<LLVM::LLVMStructType>();
-    Type elemType = packStructType.getBody()
-                        .front()
-                        .cast<LLVM::LLVMStructType>()
+    LLVM::LLVMStructType packStructType = cast<LLVM::LLVMStructType>(
+        getTypeConverter()->convertType(op.getMatrixC().getType()));
+    Type elemType = cast<LLVM::LLVMStructType>(packStructType.getBody().front())
                         .getBody()
                         .front();
     Value zero = b.create<LLVM::ConstantOp>(elemType, b.getZeroAttr(elemType));
@@ -1640,7 +1636,7 @@ struct NVGPUWarpgroupMmaInitAccumulatorOpLowering
     SmallVector<Value> innerStructs;
     // Unpack the structs and set all values to zero
     for (auto [idx, s] : llvm::enumerate(packStructType.getBody())) {
-      auto structType = s.cast<LLVM::LLVMStructType>();
+      auto structType = cast<LLVM::LLVMStructType>(s);
       Value structValue = b.create<LLVM::ExtractValueOp>(packStruct, idx);
       for (unsigned i = 0; i < structType.getBody().size(); ++i) {
         structValue = b.create<LLVM::InsertValueOp>(
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index ef8d59c9b26082..b6b85cab5a3821 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -618,7 +618,7 @@ static Value expandRank(PatternRewriter &rewriter, Location loc, Value tensor,
 static SmallVector<Value> expandInputRanks(PatternRewriter &rewriter,
                                            Location loc, Operation *operation) {
   auto rank =
-      operation->getResultTypes().front().cast<RankedTensorType>().getRank();
+      cast<RankedTensorType>(operation->getResultTypes().front()).getRank();
   return llvm::map_to_vector(operation->getOperands(), [&](Value operand) {
     return expandRank(rewriter, loc, operand, rank);
   });
@@ -680,7 +680,7 @@ computeTargetSize(PatternRewriter &rewriter, Location loc, IndexPool &indexPool,
   // dimension, that is the target size. An occurrence of an additional static
   // dimension greater than 1 with a different value is undefined behavior.
   for (auto operand : operands) {
-    auto size = operand.getType().cast<RankedTensorType>().getDimSize(dim);
+    auto size = cast<RankedTensorType>(operand.getType()).getDimSize(dim);
     if (!ShapedType::isDynamic(size) && size > 1)
       return {rewriter.getIndexAttr(size), operand};
   }
@@ -688,7 +688,7 @@ computeTargetSize(PatternRewriter &rewriter, Location loc, IndexPool &indexPool,
   // Filter operands with dynamic dimension
   auto operandsWithDynamicDim =
       llvm::to_vector(llvm::make_filter_range(operands, [&](Value operand) {
-        return operand.getType().cast<RankedTensorType>().isDynamicDim(dim);
+        return cast<RankedTensorType>(operand.getType()).isDynamicDim(dim);
       }));
 
   // If no operand has a dynamic dimension, it means all sizes were 1
@@ -718,7 +718,7 @@ static std::pair<SmallVector<OpFoldResult>, SmallVector<Value>>
 computeTargetShape(PatternRewriter &rewriter, Location loc,
                    IndexPool &indexPool, ValueRange operands) {
   assert(!operands.empty());
-  auto rank = operands.front().getType().cast<RankedTensorType>().getRank();
+  auto rank = cast<RankedTensorType>(operands.front().getType()).getRank();
   SmallVector<OpFoldResult> targetShape;
   SmallVector<Value> masterOperands;
   for (auto dim : llvm::seq<int64_t>(0, rank)) {
@@ -735,7 +735,7 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
                                        int64_t dim, OpFoldResult targetSize,
                                        Value masterOperand) {
   // Nothing to do if this is a static dimension
-  auto rankedTensorType = operand.getType().cast<RankedTensorType>();
+  auto rankedTensorType = cast<RankedTensorType>(operand.getType());
   if (!rankedTensorType.isDynamicDim(dim))
     return operand;
 
@@ -817,7 +817,7 @@ static Value broadcastDynamicDimensions(PatternRewriter &rewriter, Location loc,
                                         IndexPool &indexPool, Value operand,
                                         ArrayRef<OpFoldResult> targetShape,
                                         ArrayRef<Value> masterOperands) {
-  int64_t rank = operand.getType().cast<RankedTensorType>().getRank();
+  int64_t rank = cast<RankedTensorType>(operand.getType()).getRank();
   assert((int64_t)targetShape.size() == rank);
   assert((int64_t)masterOperands.size() == rank);
   for (auto index : llvm::seq<int64_t>(0, rank))
@@ -848,8 +848,7 @@ emitElementwiseComputation(PatternRewriter &rewriter, Location loc,
                            Operation *operation, ValueRange operands,
                            ArrayRef<OpFoldResult> targetShape) {
   // Generate output tensor
-  auto resultType =
-      operation->getResultTypes().front().cast<RankedTensorType>();
+  auto resultType = cast<RankedTensorType>(operation->getResultTypes().front());
   Value outputTensor = rewriter.create<tensor::EmptyOp>(
       loc, targetShape, resultType.getElementType());
 
@@ -2274,8 ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/89356


More information about the Mlir-commits mailing list