[Mlir-commits] [mlir] 5c8ce6d - [mlir] Move casting method calls to function calls

Tres Popp llvmlistbot at llvm.org
Fri May 12 02:52:38 PDT 2023


Author: Tres Popp
Date: 2023-05-12T11:17:27+02:00
New Revision: 5c8ce6d5761ed6a9a39ef5a77aa45d8b6095e0f5

URL: https://github.com/llvm/llvm-project/commit/5c8ce6d5761ed6a9a39ef5a77aa45d8b6095e0f5
DIFF: https://github.com/llvm/llvm-project/commit/5c8ce6d5761ed6a9a39ef5a77aa45d8b6095e0f5.diff

LOG: [mlir] Move casting method calls to function calls

The MLIR classes Type/Attribute/Operation/Op/Value support
cast/dyn_cast/isa/dyn_cast_or_null functionality through llvm's doCast
functionality in addition to defining methods with the same name.
This change continues the migration of uses of the method to the
corresponding function call as has been decided as more consistent.

This commit attempts to update all occurrences of the casts in .td
files, although it is likely that a couple were missed.

Context:
- https://mlir.llvm.org/deprecation/ at "Use the free function variants for dyn_cast/cast/isa/…"
- Original discussion at https://discourse.llvm.org/t/preferred-casting-style-going-forward/68443

Implementation:
Unfortunatley, this was not automated, but was handled by mindlessly
going to next occurrences of patterns, selecting the piece of code to
be moved into the function call, and running a vim macro over the span
of around 4 hours.

Differential Revision: https://reviews.llvm.org/D150199

Added: 
    

Modified: 
    mlir/examples/toy/Ch3/mlir/ToyCombine.td
    mlir/examples/toy/Ch4/mlir/ToyCombine.td
    mlir/examples/toy/Ch5/mlir/ToyCombine.td
    mlir/examples/toy/Ch6/mlir/ToyCombine.td
    mlir/examples/toy/Ch7/include/toy/Ops.td
    mlir/examples/toy/Ch7/mlir/ToyCombine.td
    mlir/include/mlir/Dialect/AMX/AMX.td
    mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
    mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
    mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td
    mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
    mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
    mlir/include/mlir/Dialect/Complex/IR/ComplexAttributes.td
    mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
    mlir/include/mlir/Dialect/DLTI/DLTIBase.td
    mlir/include/mlir/Dialect/Func/IR/FuncOps.td
    mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
    mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
    mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
    mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
    mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
    mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
    mlir/include/mlir/Dialect/PDL/IR/PDLTypes.td
    mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
    mlir/include/mlir/Dialect/Quant/QuantOpsBase.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
    mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
    mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
    mlir/include/mlir/Dialect/Transform/IR/TransformTypes.td
    mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
    mlir/include/mlir/Dialect/X86Vector/X86Vector.td
    mlir/include/mlir/IR/AttrTypeBase.td
    mlir/include/mlir/IR/BuiltinAttributeInterfaces.td
    mlir/include/mlir/IR/BuiltinAttributes.td
    mlir/include/mlir/IR/BuiltinDialectBytecode.td
    mlir/include/mlir/IR/BuiltinTypes.td
    mlir/include/mlir/IR/EnumAttr.td
    mlir/include/mlir/IR/OpBase.td
    mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
    mlir/include/mlir/Interfaces/VectorInterfaces.td
    mlir/lib/Dialect/Shape/IR/ShapeCanonicalization.td
    mlir/test/lib/Dialect/Test/TestOps.td
    mlir/test/mlir-pdll/Parser/include_td.pdll
    mlir/test/mlir-tblgen/interfaces-as-constraints.td
    mlir/test/mlir-tblgen/op-attribute.td
    mlir/test/mlir-tblgen/predicate.td

Removed: 
    


################################################################################
diff  --git a/mlir/examples/toy/Ch3/mlir/ToyCombine.td b/mlir/examples/toy/Ch3/mlir/ToyCombine.td
index 15477e8969cae..11d783150ebe1 100644
--- a/mlir/examples/toy/Ch3/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch3/mlir/ToyCombine.td
@@ -42,7 +42,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
 
 // Reshape(Constant(x)) = x'
 def ReshapeConstant :
-  NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
+  NativeCodeCall<"$0.reshape(::llvm::cast<ShapedType>($1.getType()))">;
 def FoldConstantReshapeOptPattern : Pat<
   (ReshapeOp:$res (ConstantOp $arg)),
   (ConstantOp (ReshapeConstant $arg, $res))>;

diff  --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.td b/mlir/examples/toy/Ch4/mlir/ToyCombine.td
index 15477e8969cae..11d783150ebe1 100644
--- a/mlir/examples/toy/Ch4/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.td
@@ -42,7 +42,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
 
 // Reshape(Constant(x)) = x'
 def ReshapeConstant :
-  NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
+  NativeCodeCall<"$0.reshape(::llvm::cast<ShapedType>($1.getType()))">;
 def FoldConstantReshapeOptPattern : Pat<
   (ReshapeOp:$res (ConstantOp $arg)),
   (ConstantOp (ReshapeConstant $arg, $res))>;

diff  --git a/mlir/examples/toy/Ch5/mlir/ToyCombine.td b/mlir/examples/toy/Ch5/mlir/ToyCombine.td
index 15477e8969cae..11d783150ebe1 100644
--- a/mlir/examples/toy/Ch5/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch5/mlir/ToyCombine.td
@@ -42,7 +42,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
 
 // Reshape(Constant(x)) = x'
 def ReshapeConstant :
-  NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
+  NativeCodeCall<"$0.reshape(::llvm::cast<ShapedType>($1.getType()))">;
 def FoldConstantReshapeOptPattern : Pat<
   (ReshapeOp:$res (ConstantOp $arg)),
   (ConstantOp (ReshapeConstant $arg, $res))>;

diff  --git a/mlir/examples/toy/Ch6/mlir/ToyCombine.td b/mlir/examples/toy/Ch6/mlir/ToyCombine.td
index 15477e8969cae..11d783150ebe1 100644
--- a/mlir/examples/toy/Ch6/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch6/mlir/ToyCombine.td
@@ -42,7 +42,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
 
 // Reshape(Constant(x)) = x'
 def ReshapeConstant :
-  NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
+  NativeCodeCall<"$0.reshape(::llvm::cast<ShapedType>($1.getType()))">;
 def FoldConstantReshapeOptPattern : Pat<
   (ReshapeOp:$res (ConstantOp $arg)),
   (ConstantOp (ReshapeConstant $arg, $res))>;

diff  --git a/mlir/examples/toy/Ch7/include/toy/Ops.td b/mlir/examples/toy/Ch7/include/toy/Ops.td
index ba2830a5e1c47..422a2eb03a44b 100644
--- a/mlir/examples/toy/Ch7/include/toy/Ops.td
+++ b/mlir/examples/toy/Ch7/include/toy/Ops.td
@@ -48,7 +48,7 @@ class Toy_Op<string mnemonic, list<Trait> traits = []> :
 // using StructType in a similar way to Tensor or MemRef. We use `DialectType`
 // to demarcate the StructType as belonging to the Toy dialect.
 def Toy_StructType :
-    DialectType<Toy_Dialect, CPred<"$_self.isa<StructType>()">,
+    DialectType<Toy_Dialect, CPred<"::llvm::isa<StructType>($_self)">,
                 "Toy struct type">;
 
 // Provide a definition of the types that are used within the Toy dialect.

diff  --git a/mlir/examples/toy/Ch7/mlir/ToyCombine.td b/mlir/examples/toy/Ch7/mlir/ToyCombine.td
index 15477e8969cae..11d783150ebe1 100644
--- a/mlir/examples/toy/Ch7/mlir/ToyCombine.td
+++ b/mlir/examples/toy/Ch7/mlir/ToyCombine.td
@@ -42,7 +42,7 @@ def ReshapeReshapeOptPattern : Pat<(ReshapeOp(ReshapeOp $arg)),
 
 // Reshape(Constant(x)) = x'
 def ReshapeConstant :
-  NativeCodeCall<"$0.reshape(($1.getType()).cast<ShapedType>())">;
+  NativeCodeCall<"$0.reshape(::llvm::cast<ShapedType>($1.getType()))">;
 def FoldConstantReshapeOptPattern : Pat<
   (ReshapeOp:$res (ConstantOp $arg)),
   (ConstantOp (ReshapeConstant $arg, $res))>;

diff  --git a/mlir/include/mlir/Dialect/AMX/AMX.td b/mlir/include/mlir/Dialect/AMX/AMX.td
index 094f02fef85c3..f6144aca6e0ff 100644
--- a/mlir/include/mlir/Dialect/AMX/AMX.td
+++ b/mlir/include/mlir/Dialect/AMX/AMX.td
@@ -97,7 +97,7 @@ def TileZeroOp : AMX_Op<"tile_zero", [Pure]> {
     VectorOfRankAndType<[2], [F32, BF16, I32, I8]>:$res);
   let extraClassDeclaration = [{
     VectorType getVectorType() {
-      return getRes().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRes().getType());
     }
   }];
   let assemblyFormat = "attr-dict `:` type($res)";
@@ -128,10 +128,10 @@ def TileLoadOp : AMX_Op<"tile_load", [Pure]> {
     VectorOfRankAndType<[2], [F32, BF16, I32, I8]>:$res);
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getVectorType() {
-      return getRes().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRes().getType());
     }
   }];
   let assemblyFormat = "$base `[` $indices `]` attr-dict `:` "
@@ -158,10 +158,10 @@ def TileStoreOp : AMX_Op<"tile_store"> {
                    VectorOfRankAndType<[2], [F32, BF16, I32, I8]>:$val);
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getVectorType() {
-      return getVal().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVal().getType());
     }
   }];
   let assemblyFormat = "$base `[` $indices `]` `,` $val attr-dict `:` "
@@ -195,13 +195,13 @@ def TileMulFOp : AMX_Op<"tile_mulf", [
   let results = (outs VectorOfRankAndType<[2], [F32, BF16]>:$res);
   let extraClassDeclaration = [{
     VectorType getLhsVectorType() {
-      return getLhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getLhs().getType());
     }
     VectorType getRhsVectorType() {
-      return getRhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRhs().getType());
     }
     VectorType getVectorType() {
-      return getRes().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRes().getType());
     }
   }];
   let assemblyFormat = "$lhs `,` $rhs `,` $acc attr-dict `:` "
@@ -237,13 +237,13 @@ def TileMulIOp : AMX_Op<"tile_muli", [
   let results = (outs VectorOfRankAndType<[2], [I32, I8]>:$res);
   let extraClassDeclaration = [{
     VectorType getLhsVectorType() {
-      return getLhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getLhs().getType());
     }
     VectorType getRhsVectorType() {
-      return getRhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRhs().getType());
     }
     VectorType getVectorType() {
-      return getRes().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRes().getType());
     }
   }];
   let assemblyFormat = "$lhs (`zext` $isZextLhs^)? `,` $rhs (`zext` $isZextRhs^)? `,` $acc attr-dict `:` "

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
index 528421d14e8d9..c07ab9deca48c 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.td
@@ -40,7 +40,7 @@ def AffineReadOpInterface : OpInterface<"AffineReadOpInterface"> {
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        return $_op.getMemRef().getType().template cast<::mlir::MemRefType>();
+        return ::llvm::cast<::mlir::MemRefType>($_op.getMemRef().getType());
       }]
     >,
     InterfaceMethod<
@@ -103,7 +103,7 @@ def AffineWriteOpInterface : OpInterface<"AffineWriteOpInterface"> {
       /*args=*/(ins),
       /*methodBody=*/[{}],
       /*defaultImplementation=*/[{
-        return $_op.getMemRef().getType().template cast<::mlir::MemRefType>();
+        return ::llvm::cast<::mlir::MemRefType>($_op.getMemRef().getType());
       }]
     >,
     InterfaceMethod<

diff  --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index 4bbcf12b47d97..da0cc83684487 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -285,18 +285,18 @@ def AffineForOp : Affine_Op<"for",
 
     /// Returns loop step.
     int64_t getStep() {
-      return (*this)->getAttr(getStepAttrStrName()).cast<IntegerAttr>().getInt();
+      return ::llvm::cast<IntegerAttr>((*this)->getAttr(getStepAttrStrName())).getInt();
     }
 
     /// Returns affine map for the lower bound.
     AffineMap getLowerBoundMap() { return getLowerBoundMapAttr().getValue(); }
     AffineMapAttr getLowerBoundMapAttr() {
-      return (*this)->getAttr(getLowerBoundAttrStrName()).cast<AffineMapAttr>();
+      return ::llvm::cast<AffineMapAttr>((*this)->getAttr(getLowerBoundAttrStrName()));
     }
     /// Returns affine map for the upper bound. The upper bound is exclusive.
     AffineMap getUpperBoundMap() { return getUpperBoundMapAttr().getValue(); }
     AffineMapAttr getUpperBoundMapAttr() {
-      return (*this)->getAttr(getUpperBoundAttrStrName()).cast<AffineMapAttr>();
+      return ::llvm::cast<AffineMapAttr>((*this)->getAttr(getUpperBoundAttrStrName()));
     }
 
     /// Set lower bound. The new bound must have the same number of operands as
@@ -508,7 +508,7 @@ class AffineLoadOpBase<string mnemonic, list<Trait> traits = []> :
 
     /// Returns the affine map used to index the memref for this operation.
     AffineMapAttr getAffineMapAttr() {
-      return (*this)->getAttr(getMapAttrStrName()).cast<AffineMapAttr>();
+      return ::llvm::cast<AffineMapAttr>((*this)->getAttr(getMapAttrStrName()));
     }
 
     static StringRef getMapAttrStrName() { return "map"; }
@@ -824,13 +824,13 @@ def AffinePrefetchOp : Affine_Op<"prefetch",
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getMemref().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemref().getType());
     }
 
     /// Returns the affine map used to index the memref for this operation.
     AffineMap getAffineMap() { return getAffineMapAttr().getValue(); }
     AffineMapAttr getAffineMapAttr() {
-      return (*this)->getAttr(getMapAttrStrName()).cast<AffineMapAttr>();
+      return ::llvm::cast<AffineMapAttr>((*this)->getAttr(getMapAttrStrName()));
     }
 
     /// Impelements the AffineMapAccessInterface.
@@ -875,7 +875,7 @@ class AffineStoreOpBase<string mnemonic, list<Trait> traits = []> :
 
     /// Returns the affine map used to index the memref for this operation.
     AffineMapAttr getAffineMapAttr() {
-      return (*this)->getAttr(getMapAttrStrName()).cast<AffineMapAttr>();
+      return ::llvm::cast<AffineMapAttr>((*this)->getAttr(getMapAttrStrName()));
     }
 
     static StringRef getMapAttrStrName() { return "map"; }
@@ -1012,7 +1012,7 @@ def AffineVectorLoadOp : AffineLoadOpBase<"vector_load"> {
 
   let extraClassDeclaration = extraClassDeclarationBase # [{
     VectorType getVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
 
@@ -1078,7 +1078,7 @@ def AffineVectorStoreOp : AffineStoreOpBase<"vector_store"> {
 
   let extraClassDeclaration = extraClassDeclarationBase # [{
     VectorType getVectorType() {
-      return getValue().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getValue().getType());
     }
   }];
 

diff  --git a/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td b/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td
index d6d95a865bb94..506d434815eb8 100644
--- a/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td
+++ b/mlir/include/mlir/Dialect/ArmNeon/ArmNeon.td
@@ -70,7 +70,7 @@ def SMullOp : ArmNeon_OverloadedOneResultIntrOp<"smull", [
        AllTypesMatch<["a", "b"]>,
        TypesMatchWith<
          "res has same vector shape and element bitwidth scaled by 2 as a",
-         "a", "res", "$_self.cast<VectorType>().scaleElementBitwidth(2)">
+         "a", "res", "::llvm::cast<VectorType>($_self).scaleElementBitwidth(2)">
     ]> {
   let summary = "smull roundscale op";
   let description = [{
@@ -100,7 +100,7 @@ def SdotOp : ArmNeon_OverloadedOperandsWithOneResultIntrOp<"sdot",[1], [
       AllTypesMatch<["a", "res"]>,
       TypesMatchWith<"res has the same number of elements as operand b",
                      "b", "res",
-                     "VectorType::get({$_self.cast<VectorType>().getShape()[0] / 4},"
+                     "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0] / 4},"
                      "IntegerType::get($_self.getContext(), 32))">]> {
   let summary = "sdot op";
   let description = [{
@@ -133,19 +133,19 @@ def Sdot2dOp : ArmNeon_2dOp<"sdot", [
       AllTypesMatch<["a", "res"]>,
       PredOpTrait<
         "operand `a` should be 1-dimensional",
-        CPred<"getA().getType().cast<VectorType>().getShape().size() == 1">
+        CPred<"::llvm::cast<VectorType>(getA().getType()).getShape().size() == 1">
       >,
       PredOpTrait<
         "operand `b` should be 2-dimensional",
-        CPred<"getB().getType().cast<VectorType>().getShape().size() == 2">
+        CPred<"::llvm::cast<VectorType>(getB().getType()).getShape().size() == 2">
       >,
       PredOpTrait<
         "operand `b` should have 4 columns",
-        CPred<"getB().getType().cast<VectorType>().getShape()[1] == 4">
+        CPred<"::llvm::cast<VectorType>(getB().getType()).getShape()[1] == 4">
       >,
       PredOpTrait<
         "operand `b` should have as many rows as the size of operand `a`",
-        CPred<"getB().getType().cast<VectorType>().getShape()[0] == getA().getType().cast<VectorType>().getShape()[0]">
+        CPred<"::llvm::cast<VectorType>(getB().getType()).getShape()[0] == ::llvm::cast<VectorType>(getA().getType()).getShape()[0]">
       >,
       ]
   > {

diff  --git a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
index 9824238e7d933..6ca4925a45234 100644
--- a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
+++ b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
@@ -641,7 +641,7 @@ def Async_RuntimeAwaitAndResumeOp : Async_Op<"runtime.await_and_resume"> {
 def Async_RuntimeStoreOp : Async_Op<"runtime.store",
       [TypesMatchWith<"type of 'value' matches element type of 'storage'",
                      "storage", "value",
-                     "$_self.cast<ValueType>().getValueType()">]> {
+                     "::llvm::cast<ValueType>($_self).getValueType()">]> {
   let summary = "stores the value into the runtime async.value";
   let description = [{
     The `async.runtime.store` operation stores the value into the runtime
@@ -656,7 +656,7 @@ def Async_RuntimeStoreOp : Async_Op<"runtime.store",
 def Async_RuntimeLoadOp : Async_Op<"runtime.load",
       [TypesMatchWith<"type of 'value' matches element type of 'storage'",
                      "storage", "result",
-                     "$_self.cast<ValueType>().getValueType()">]> {
+                     "::llvm::cast<ValueType>($_self).getValueType()">]> {
   let summary = "loads the value from the runtime async.value";
   let description = [{
     The `async.runtime.load` operation loads the value from the runtime

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
index 4f33425169a51..0674cb09c25f7 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
@@ -166,7 +166,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
                       "const ::mlir::bufferization::AnalysisState &":$state),
         /*methodBody=*/"",
         /*defaultImplementation=*/[{
-          return opOperand.get().getType().isa<::mlir::UnrankedTensorType>();
+          return ::llvm::isa<::mlir::UnrankedTensorType>(opOperand.get().getType());
         }]
       >,
       InterfaceMethod<
@@ -233,7 +233,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
         /*methodBody=*/"",
         /*defaultImplementation=*/[{
           // Does not have to be implemented for ops without tensor OpOperands.
-          assert(opOperand.get().getType().isa<::mlir::TensorType>() &&
+          assert(::llvm::isa<::mlir::TensorType>(opOperand.get().getType()) &&
                  "expected OpOperand with tensor type");
           llvm_unreachable("getAliasingOpResults not implemented");
         }]
@@ -304,7 +304,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
                       "const ::mlir::bufferization::AnalysisState &":$state),
         /*methodBody=*/"",
         /*defaultImplementation=*/[{
-          assert(opResult.getType().isa<::mlir::TensorType>() &&
+          assert(::llvm::isa<::mlir::TensorType>(opResult.getType()) &&
                  "expected OpResult with tensor type");
           return ::mlir::bufferization::detail::defaultGetAliasingOpOperands(
               opResult, state);
@@ -394,7 +394,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
                       "const ::mlir::bufferization::AnalysisState &":$state),
         /*methodBody=*/"",
         /*defaultImplementation=*/[{
-          return value.isa<::mlir::OpResult>();
+          return ::llvm::isa<::mlir::OpResult>(value);
         }]
       >,
       InterfaceMethod<

diff  --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index d4a8161fa9506..3c242a36d0e47 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -108,7 +108,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
         const DenseMap<Value, BaseMemRefType> &fixedTypes);
 
     RankedTensorType getType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
 
     // Return true if the size of the tensor is dynamic at `idx`
@@ -322,8 +322,8 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
     /// The result of a to_tensor is always a tensor.
     TensorType getType() {
       Type resultType = getResult().getType();
-      if (resultType.isa<TensorType>())
-        return resultType.cast<TensorType>();
+      if (::llvm::isa<TensorType>(resultType))
+        return ::llvm::cast<TensorType>(resultType);
       return {};
     }
 
@@ -342,7 +342,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
     FailureOr<BaseMemRefType> getBufferType(
         Value value, const BufferizationOptions &options,
         const DenseMap<Value, BaseMemRefType> &fixedTypes) {
-      return getMemref().getType().cast<BaseMemRefType>();
+      return ::llvm::cast<BaseMemRefType>(getMemref().getType());
     }
   }];
 

diff  --git a/mlir/include/mlir/Dialect/Complex/IR/ComplexAttributes.td b/mlir/include/mlir/Dialect/Complex/IR/ComplexAttributes.td
index 886ae3a3f721f..52fd824f65e74 100644
--- a/mlir/include/mlir/Dialect/Complex/IR/ComplexAttributes.td
+++ b/mlir/include/mlir/Dialect/Complex/IR/ComplexAttributes.td
@@ -44,7 +44,7 @@ def Complex_NumberAttr : Complex_Attr<"Number", "number",
     AttrBuilderWithInferredContext<(ins "mlir::ComplexType":$type,
                                         "double":$real,
                                         "double":$imag), [{
-      auto elementType = type.getElementType().cast<FloatType>();
+      auto elementType = ::llvm::cast<FloatType>(type.getElementType());
       APFloat realFloat(real);
       bool unused;
       realFloat.convert(elementType.getFloatSemantics(),

diff  --git a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
index 02dceaa2bd473..7116bed2763f6 100644
--- a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
+++ b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
@@ -44,7 +44,7 @@ class ComplexUnaryOp<string mnemonic, list<Trait> traits = []> :
 def AbsOp : ComplexUnaryOp<"abs",
     [TypesMatchWith<"complex element type matches result type",
                     "complex", "result",
-                    "$_self.cast<ComplexType>().getElementType()">]> {
+                    "::llvm::cast<ComplexType>($_self).getElementType()">]> {
   let summary = "computes absolute value of a complex number";
   let description = [{
     The `abs` op takes a single complex number and computes its absolute value.
@@ -158,10 +158,10 @@ def CreateOp : Complex_Op<"create",
      AllTypesMatch<["real", "imaginary"]>,
      TypesMatchWith<"complex element type matches real operand type",
                     "complex", "real",
-                    "$_self.cast<ComplexType>().getElementType()">,
+                    "::llvm::cast<ComplexType>($_self).getElementType()">,
      TypesMatchWith<"complex element type matches imaginary operand type",
                     "complex", "imaginary",
-                    "$_self.cast<ComplexType>().getElementType()">]> {
+                    "::llvm::cast<ComplexType>($_self).getElementType()">]> {
 
   let summary = "complex number creation operation";
   let description = [{
@@ -276,7 +276,7 @@ def Expm1Op : ComplexUnaryOp<"expm1", [SameOperandsAndResultType]> {
 def ImOp : ComplexUnaryOp<"im",
     [TypesMatchWith<"complex element type matches result type",
                     "complex", "imaginary",
-                    "$_self.cast<ComplexType>().getElementType()">]> {
+                    "::llvm::cast<ComplexType>($_self).getElementType()">]> {
   let summary = "extracts the imaginary part of a complex number";
   let description = [{
     The `im` op takes a single complex number and extracts the imaginary part.
@@ -422,7 +422,7 @@ def PowOp : ComplexArithmeticOp<"pow"> {
 def ReOp : ComplexUnaryOp<"re",
     [TypesMatchWith<"complex element type matches result type",
                     "complex", "real",
-                    "$_self.cast<ComplexType>().getElementType()">]> {
+                    "::llvm::cast<ComplexType>($_self).getElementType()">]> {
   let summary = "extracts the real part of a complex number";
   let description = [{
     The `re` op takes a single complex number and extracts the real part.
@@ -602,7 +602,7 @@ def ConjOp : ComplexUnaryOp<"conj", [SameOperandsAndResultType]> {
 def AngleOp : ComplexUnaryOp<"angle",
                            [TypesMatchWith<"complex element type matches result type",
                                            "complex", "result",
-                                           "$_self.cast<ComplexType>().getElementType()">]> {
+                                           "::llvm::cast<ComplexType>($_self).getElementType()">]> {
   let summary = "computes argument value of a complex number";
   let description = [{
     The `angle` op takes a single complex number and computes its argument value with a branch cut along the negative real axis.

diff  --git a/mlir/include/mlir/Dialect/DLTI/DLTIBase.td b/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
index 38b3b0cc8f671..339a7184a125f 100644
--- a/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
+++ b/mlir/include/mlir/Dialect/DLTI/DLTIBase.td
@@ -50,7 +50,7 @@ def DLTI_Dialect : Dialect {
 
 def DLTI_DataLayoutEntryAttr : DialectAttr<
     DLTI_Dialect,
-    CPred<"$_self.isa<::mlir::DataLayoutEntryAttr>()">,
+    CPred<"::llvm::isa<::mlir::DataLayoutEntryAttr>($_self)">,
     "Target data layout entry"> {
   let storageType = "::mlir::DataLayoutEntryAttr";
   let returnType = "::mlir::DataLayoutEntryAttr";
@@ -59,7 +59,7 @@ def DLTI_DataLayoutEntryAttr : DialectAttr<
 
 def DLTI_DataLayoutSpecAttr : DialectAttr<
     DLTI_Dialect,
-    CPred<"$_self.isa<::mlir::DataLayoutSpecAttr>()">,
+    CPred<"::llvm::isa<::mlir::DataLayoutSpecAttr>($_self)">,
     "Target data layout specification"> {
   let storageType = "::mlir::DataLayoutSpecAttr";
   let returnType = "::mlir::DataLayoutSpecAttr";

diff  --git a/mlir/include/mlir/Dialect/Func/IR/FuncOps.td b/mlir/include/mlir/Dialect/Func/IR/FuncOps.td
index fb206f1be8175..69dc8d32c3c66 100644
--- a/mlir/include/mlir/Dialect/Func/IR/FuncOps.td
+++ b/mlir/include/mlir/Dialect/Func/IR/FuncOps.td
@@ -111,10 +111,10 @@ def CallIndirectOp : Func_Op<"call_indirect", [
       CallOpInterface,
       TypesMatchWith<"callee input types match argument types",
                      "callee", "callee_operands",
-                     "$_self.cast<FunctionType>().getInputs()">,
+                     "::llvm::cast<FunctionType>($_self).getInputs()">,
       TypesMatchWith<"callee result types match result types",
                      "callee", "results",
-                     "$_self.cast<FunctionType>().getResults()">
+                     "::llvm::cast<FunctionType>($_self).getResults()">
     ]> {
   let summary = "indirect call operation";
   let description = [{
@@ -141,7 +141,7 @@ def CallIndirectOp : Func_Op<"call_indirect", [
     OpBuilder<(ins "Value":$callee, CArg<"ValueRange", "{}">:$operands), [{
       $_state.operands.push_back(callee);
       $_state.addOperands(operands);
-      $_state.addTypes(callee.getType().cast<FunctionType>().getResults());
+      $_state.addTypes(::llvm::cast<FunctionType>(callee.getType()).getResults());
     }]>];
 
   let extraClassDeclaration = [{

diff  --git a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
index fffe4a45931ec..50fcd197bb9ee 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td
@@ -92,11 +92,11 @@ def GPU_AddressSpaceAttr :
 //===----------------------------------------------------------------------===//
 
 def GPU_AsyncToken : DialectType<
-  GPU_Dialect, CPred<"$_self.isa<::mlir::gpu::AsyncTokenType>()">, "async token type">,
+  GPU_Dialect, CPred<"::llvm::isa<::mlir::gpu::AsyncTokenType>($_self)">, "async token type">,
              BuildableType<"mlir::gpu::AsyncTokenType::get($_builder.getContext())">;
 
 // Predicat to check if type is gpu::MMAMatrixType.
-def IsMMAMatrixTypePred : CPred<"$_self.isa<::mlir::gpu::MMAMatrixType>()">;
+def IsMMAMatrixTypePred : CPred<"::llvm::isa<::mlir::gpu::MMAMatrixType>($_self)">;
 
 def GPU_MMAMatrix : DialectType<
   GPU_Dialect, IsMMAMatrixTypePred, "MMAMatrix type">;
@@ -106,7 +106,7 @@ def GPU_MMAMemRef : MemRefOf<[I8, I32, F16, F32, VectorOfRankAndType<[1], [I8, I
 
 class MMAMatrixOf<list<Type> allowedTypes> :
   ContainerType<AnyTypeOf<allowedTypes>, IsMMAMatrixTypePred,
-  "$_self.cast<::mlir::gpu::MMAMatrixType>().getElementType()",
+  "::llvm::cast<::mlir::gpu::MMAMatrixType>($_self).getElementType()",
   "gpu.mma_matrix", "::mlir::gpu::MMAMatrixType">;
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index 07be3c3ffcf6a..f682346d8108c 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -1152,7 +1152,7 @@ def GPU_AllocOp : GPU_Op<"alloc", [
                  Optional<GPU_AsyncToken>:$asyncToken);
 
   let extraClassDeclaration = [{
-    MemRefType getType() { return getMemref().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getMemref().getType()); }
   }];
 
   let assemblyFormat = [{
@@ -1415,7 +1415,7 @@ def GPU_SubgroupMmaConstantMatrixOp : GPU_Op<"subgroup_mma_constant_matrix",
     [Pure,
      TypesMatchWith<"value type matches element type of mma_matrix",
                     "res", "value",
-                    "$_self.cast<gpu::MMAMatrixType>().getElementType()">]>{
+                    "::llvm::cast<gpu::MMAMatrixType>($_self).getElementType()">]>{
 
   let summary = "GPU warp synchronous constant matrix";
 
@@ -1447,7 +1447,7 @@ def GPU_SubgroupMmaConstantMatrixOp : GPU_Op<"subgroup_mma_constant_matrix",
 
   let extraClassDeclaration = [{
     gpu::MMAMatrixType getType() {
-      return getRes().getType().cast<gpu::MMAMatrixType>();
+      return ::llvm::cast<gpu::MMAMatrixType>(getRes().getType());
     }
   }];
 
@@ -1524,7 +1524,7 @@ def GPU_SubgroupMmaElementwiseOp : GPU_Op<"subgroup_mma_elementwise",
 
   let extraClassDeclaration = [{
     gpu::MMAMatrixType getType() {
-      return getRes().getType().cast<gpu::MMAMatrixType>();
+      return ::llvm::cast<gpu::MMAMatrixType>(getRes().getType());
     }
   }];
 

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index 95f3aa8387b31..8d8494924e2a9 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -272,7 +272,7 @@ def CConvEnum : LLVM_CEnumAttr<
 
 def CConv : DialectAttr<
     LLVM_Dialect,
-    CPred<"$_self.isa<::mlir::LLVM::CConvAttr>()">,
+    CPred<"::llvm::isa<::mlir::LLVM::CConvAttr>($_self)">,
     "LLVM Calling Convention specification"> {
   let storageType = "::mlir::LLVM::CConvAttr";
   let returnType = "::mlir::LLVM::cconv::CConv";
@@ -580,7 +580,7 @@ def LinkageEnum : LLVM_EnumAttr<
 
 def Linkage : DialectAttr<
     LLVM_Dialect,
-    CPred<"$_self.isa<::mlir::LLVM::LinkageAttr>()">,
+    CPred<"::llvm::isa<::mlir::LLVM::LinkageAttr>($_self)">,
     "LLVM Linkage specification"> {
   let storageType = "::mlir::LLVM::LinkageAttr";
   let returnType = "::mlir::LLVM::Linkage";

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index 9bf89b5b25ff8..9512765dbb062 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -30,7 +30,7 @@ def LLVM_Type : DialectType<LLVM_Dialect,
 
 // Type constraint accepting LLVM token type.
 def LLVM_TokenType : Type<
-  CPred<"$_self.isa<::mlir::LLVM::LLVMTokenType>()">,
+  CPred<"::llvm::isa<::mlir::LLVM::LLVMTokenType>($_self)">,
   "LLVM token type">,
   BuildableType<"::mlir::LLVM::LLVMTokenType::get($_builder.getContext())">;
 
@@ -38,12 +38,12 @@ def LLVM_TokenType : Type<
 // and function.
 def LLVM_PrimitiveType : Type<
   And<[LLVM_Type.predicate,
-       CPred<"!$_self.isa<::mlir::LLVM::LLVMVoidType, "
-                         "::mlir::LLVM::LLVMFunctionType>()">]>,
+       CPred<"!::llvm::isa<::mlir::LLVM::LLVMVoidType, "
+                         "::mlir::LLVM::LLVMFunctionType>($_self)">]>,
   "primitive LLVM type">;
 
 // Type constraint accepting any LLVM function type.
-def LLVM_FunctionType : Type<CPred<"$_self.isa<::mlir::LLVM::LLVMFunctionType>()">,
+def LLVM_FunctionType : Type<CPred<"::llvm::isa<::mlir::LLVM::LLVMFunctionType>($_self)">,
                          "LLVM function type", "::mlir::LLVM::LLVMFunctionType">;
 
 // Type constraint accepting any LLVM floating point type.
@@ -52,37 +52,37 @@ def LLVM_AnyFloat : Type<
   "floating point LLVM type">;
 
 // Type constraint accepting any LLVM pointer type.
-def LLVM_AnyPointer : Type<CPred<"$_self.isa<::mlir::LLVM::LLVMPointerType>()">,
+def LLVM_AnyPointer : Type<CPred<"::llvm::isa<::mlir::LLVM::LLVMPointerType>($_self)">,
                           "LLVM pointer type", "::mlir::LLVM::LLVMPointerType">;
 
 // Type constraint accepting LLVM pointer type with an additional constraint
 // on the element type.
 class LLVM_PointerTo<Type pointee> : Type<
   And<[LLVM_AnyPointer.predicate,
-       Or<[CPred<"$_self.cast<::mlir::LLVM::LLVMPointerType>().isOpaque()">,
+       Or<[CPred<"::llvm::cast<::mlir::LLVM::LLVMPointerType>($_self).isOpaque()">,
            SubstLeaves<
              "$_self",
-             "$_self.cast<::mlir::LLVM::LLVMPointerType>().getElementType()",
+             "::llvm::cast<::mlir::LLVM::LLVMPointerType>($_self).getElementType()",
              pointee.predicate>]>]>,
   "LLVM pointer to " # pointee.summary, "::mlir::LLVM::LLVMPointerType">;
 
 // Type constraints accepting LLVM pointer type to integer of a specific width.
 class LLVM_IntPtrBase<int width, int addressSpace = 0> : Type<
   And<[LLVM_PointerTo<I<width>>.predicate,
-       CPred<"$_self.cast<::mlir::LLVM::LLVMPointerType>().getAddressSpace()"
+       CPred<"::llvm::cast<::mlir::LLVM::LLVMPointerType>($_self).getAddressSpace()"
              " == " # addressSpace>]>,
   "LLVM pointer to " # I<width>.summary>;
 
 def LLVM_i8Ptr : LLVM_IntPtrBase<8>;
 
 // Type constraint accepting any LLVM structure type.
-def LLVM_AnyStruct : Type<CPred<"$_self.isa<::mlir::LLVM::LLVMStructType>()">,
+def LLVM_AnyStruct : Type<CPred<"::llvm::isa<::mlir::LLVM::LLVMStructType>($_self)">,
                          "LLVM structure type">;
 
 // Type constraint accepting opaque LLVM structure type.
 def LLVM_OpaqueStruct : Type<
   And<[LLVM_AnyStruct.predicate,
-       CPred<"$_self.cast<::mlir::LLVM::LLVMStructType>().isOpaque()">]>>;
+       CPred<"::llvm::cast<::mlir::LLVM::LLVMStructType>($_self).isOpaque()">]>>;
 
 // Type constraint accepting any LLVM type that can be loaded or stored, i.e. a
 // type that has size (not void, function or opaque struct type).
@@ -93,8 +93,8 @@ def LLVM_LoadableType : Type<
 
 // Type constraint accepting any LLVM aggregate type, i.e. structure or array.
 def LLVM_AnyAggregate : Type<
-  CPred<"$_self.isa<::mlir::LLVM::LLVMStructType, "
-                   "::mlir::LLVM::LLVMArrayType>()">,
+  CPred<"::llvm::isa<::mlir::LLVM::LLVMStructType, "
+                   "::mlir::LLVM::LLVMArrayType>($_self)">,
   "LLVM aggregate type">;
 
 // Type constraint accepting any LLVM non-aggregate type, i.e. not structure or
@@ -234,7 +234,7 @@ def LLVM_IntrPatterns {
     [{moduleTranslation.convertType(opInst.getResult($0).getType())}];
   string structResult =
     [{moduleTranslation.convertType(
-        opInst.getResult(0).getType().cast<LLVM::LLVMStructType>()
+        ::llvm::cast<LLVM::LLVMStructType>(opInst.getResult(0).getType())
               .getBody()[$0])}];
 }
 
@@ -421,7 +421,7 @@ def LLVM_VoidResultTypeOpBuilder :
     CArg<"ArrayRef<NamedAttribute>", "{}">:$attributes),
   [{
     assert(isCompatibleType(resultType) && "result must be an LLVM type");
-    assert(resultType.isa<LLVMVoidType>() &&
+    assert(::llvm::isa<LLVMVoidType>(resultType) &&
            "for zero-result operands, only 'void' is accepted as result type");
     build($_builder, $_state, operands, attributes);
   }]>;

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index fefc602ae3729..3ead2d2867a6b 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -172,7 +172,7 @@ def LLVM_FNegOp : LLVM_UnaryFloatArithmeticOp<
   LLVM_ScalarOrVectorOf<LLVM_AnyFloat>, "fneg", "FNeg">;
 
 // Memory-related operations.
-def LLVM_AllocaOp : LLVM_Op<"alloca", 
+def LLVM_AllocaOp : LLVM_Op<"alloca",
     [DeclareOpInterfaceMethods<PromotableAllocationOpInterface>]>,
   LLVM_MemOpPatterns {
   let arguments = (ins AnyInteger:$arraySize,
@@ -186,7 +186,7 @@ def LLVM_AllocaOp : LLVM_Op<"alloca",
     auto addrSpace = $_resultType->getPointerAddressSpace();
     llvm::Type *elementType = moduleTranslation.convertType(
         $elem_type ? *$elem_type
-                   : op.getType().cast<LLVMPointerType>().getElementType());
+                   : ::llvm::cast<LLVMPointerType>(op.getType()).getElementType());
     auto *inst = builder.CreateAlloca(elementType, addrSpace, $arraySize);
     }] # setAlignmentCode # [{
     inst->setUsedWithInAlloca($inalloca);
@@ -206,7 +206,7 @@ def LLVM_AllocaOp : LLVM_Op<"alloca",
     OpBuilder<(ins "Type":$resultType, "Value":$arraySize,
                "unsigned":$alignment),
     [{
-      assert(!resultType.cast<LLVMPointerType>().isOpaque() &&
+      assert(!::llvm::cast<LLVMPointerType>(resultType).isOpaque() &&
              "pass the allocated type explicitly if opaque pointers are used");
       if (alignment == 0)
         return build($_builder, $_state, resultType, arraySize, IntegerAttr(),
@@ -218,7 +218,7 @@ def LLVM_AllocaOp : LLVM_Op<"alloca",
                CArg<"unsigned", "0">:$alignment),
     [{
       TypeAttr elemTypeAttr =
-          resultType.cast<LLVMPointerType>().isOpaque() ?
+          ::llvm::cast<LLVMPointerType>(resultType).isOpaque() ?
           TypeAttr::get(elementType) : TypeAttr();
       build($_builder, $_state, resultType, arraySize,
             alignment == 0 ? IntegerAttr()
@@ -287,7 +287,7 @@ def LLVM_GEPOp : LLVM_Op<"getelementptr", [Pure,
     GEPIndicesAdaptor<decltype($dynamicIndices)>
         gepIndices(op.getRawConstantIndicesAttr(), $dynamicIndices);
     for (PointerUnion<IntegerAttr, llvm::Value*> valueOrAttr : gepIndices) {
-      if (llvm::Value* value = valueOrAttr.dyn_cast<llvm::Value*>())
+      if (llvm::Value* value = ::llvm::dyn_cast<llvm::Value*>(valueOrAttr))
         indices.push_back(value);
       else
         indices.push_back(
@@ -1604,7 +1604,7 @@ def LLVM_LLVMFuncOp : LLVM_Op<"func", [
     /// type if it is not void, or an empty array if the function's return type
     /// is void, as void is not assignable to a value.
     ArrayRef<Type> getCallableResults() {
-      if (getFunctionType().getReturnType().isa<LLVM::LLVMVoidType>())
+      if (::llvm::isa<LLVM::LLVMVoidType>(getFunctionType().getReturnType()))
         return {};
       return getFunctionType().getReturnTypes();
     }

diff  --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
index ace2427f577cb..1aba722773511 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.td
@@ -307,7 +307,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
       /*defaultImplementation=*/[{
         assert(opOperand->getOwner() == this->getOperation());
         if (auto shapedType =
-              opOperand->get().getType().template dyn_cast<ShapedType>())
+              ::llvm::dyn_cast<ShapedType>(opOperand->get().getType()))
           return shapedType.getRank();
         return 0;
       }]
@@ -359,7 +359,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
       /*defaultImplementation=*/[{
         assert(opOperand->getOwner() == this->getOperation());
         if (auto shapedType =
-              opOperand->get().getType().template dyn_cast<ShapedType>())
+              ::llvm::dyn_cast<ShapedType>(opOperand->get().getType()))
           return shapedType.getShape();
         return {};
       }]
@@ -884,8 +884,8 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
 
     private:
     void setOperandSegmentAt(unsigned idx, unsigned val) {
-      auto attr = (*this)->getAttr("operand_segment_sizes")
-        .cast<DenseIntElementsAttr>();
+      auto attr = ::llvm::cast<DenseIntElementsAttr>(
+                      (*this)->getAttr("operand_segment_sizes"));
       unsigned i = 0;
       auto newAttr = attr.mapValues(IntegerType::get(getContext(), 32),
         [&](const APInt &v) { return (i++ == idx) ? APInt(32, val) : v; });

diff  --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
index b0c8537ff60f5..8500c4c26ab25 100644
--- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
+++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td
@@ -111,7 +111,7 @@ class AllocLikeOp<string mnemonic,
   let extraClassDeclaration = [{
     static StringRef getAlignmentAttrStrName() { return "alignment"; }
 
-    MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
 
     SmallVector<OpFoldResult> getMixedSizes() {
       SmallVector<OpFoldResult> result;
@@ -293,7 +293,7 @@ def MemRef_ReallocOp : MemRef_Op<"realloc"> {
 
     let extraClassDeclaration = [{
     /// The result of a realloc is always a memref.
-    MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
   }];
 
   let assemblyFormat = [{
@@ -699,7 +699,7 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
     Value getSrcMemRef() { return getOperand(0); }
     // Returns the rank (number of indices) of the source MemRefType.
     unsigned getSrcMemRefRank() {
-      return getSrcMemRef().getType().cast<MemRefType>().getRank();
+      return ::llvm::cast<MemRefType>(getSrcMemRef().getType()).getRank();
     }
     // Returns the source memref indices for this DMA operation.
     operand_range getSrcIndices() {
@@ -711,13 +711,13 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
     Value getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
     // Returns the rank (number of indices) of the destination MemRefType.
     unsigned getDstMemRefRank() {
-      return getDstMemRef().getType().cast<MemRefType>().getRank();
+      return ::llvm::cast<MemRefType>(getDstMemRef().getType()).getRank();
     }
     unsigned getSrcMemorySpace() {
-      return getSrcMemRef().getType().cast<MemRefType>().getMemorySpaceAsInt();
+      return ::llvm::cast<MemRefType>(getSrcMemRef().getType()).getMemorySpaceAsInt();
     }
     unsigned getDstMemorySpace() {
-      return getDstMemRef().getType().cast<MemRefType>().getMemorySpaceAsInt();
+      return ::llvm::cast<MemRefType>(getDstMemRef().getType()).getMemorySpaceAsInt();
     }
 
     // Returns the destination memref indices for this DMA operation.
@@ -739,7 +739,7 @@ def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
 
     // Returns the rank (number of indices) of the tag MemRefType.
     unsigned getTagMemRefRank() {
-      return getTagMemRef().getType().cast<MemRefType>().getRank();
+      return ::llvm::cast<MemRefType>(getTagMemRef().getType()).getRank();
     }
 
     // Returns the tag memref index for this DMA operation.
@@ -839,7 +839,7 @@ def MemRef_DmaWaitOp : MemRef_Op<"dma_wait"> {
   let extraClassDeclaration = [{
     /// Returns the rank (number of indices) of the tag memref.
     unsigned getTagMemRefRank() {
-      return getTagMemRef().getType().cast<MemRefType>().getRank();
+      return ::llvm::cast<MemRefType>(getTagMemRef().getType()).getRank();
     }
     void getEffects(
         SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> &
@@ -988,7 +988,7 @@ def GenericAtomicRMWOp : MemRef_Op<"generic_atomic_rmw", [
       SingleBlockImplicitTerminator<"AtomicYieldOp">,
       TypesMatchWith<"result type matches element type of memref",
                      "memref", "result",
-                     "$_self.cast<MemRefType>().getElementType()">
+                     "::llvm::cast<MemRefType>($_self).getElementType()">
     ]> {
   let summary = "atomic read-modify-write operation with a region";
   let description = [{
@@ -1035,7 +1035,7 @@ def GenericAtomicRMWOp : MemRef_Op<"generic_atomic_rmw", [
       return getRegion().getArgument(0);
     }
     MemRefType getMemRefType() {
-      return getMemref().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemref().getType());
     }
   }];
   let hasCustomAssemblyFormat = 1;
@@ -1144,7 +1144,7 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
   let extraClassDeclaration = [{
      bool isExternal() { return !getInitialValue(); }
      bool isUninitialized() {
-       return !isExternal() && getInitialValue()->isa<UnitAttr>();
+       return !isExternal() && ::llvm::isa<UnitAttr>(*getInitialValue());
      }
      /// Returns the constant initial value if the memref.global is a constant,
      /// or null otherwise.
@@ -1160,7 +1160,7 @@ def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
 def LoadOp : MemRef_Op<"load",
      [TypesMatchWith<"result type matches element type of 'memref'",
                      "memref", "result",
-                     "$_self.cast<MemRefType>().getElementType()">,
+                     "::llvm::cast<MemRefType>($_self).getElementType()">,
       MemRefsNormalizable,
       DeclareOpInterfaceMethods<PromotableMemOpInterface>]> {
   let summary = "load operation";
@@ -1211,7 +1211,7 @@ def LoadOp : MemRef_Op<"load",
     Value getMemRef() { return getOperand(0); }
     void setMemRef(Value value) { setOperand(0, value); }
     MemRefType getMemRefType() {
-      return getMemRef().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemRef().getType());
     }
   }];
 
@@ -1300,7 +1300,7 @@ def MemRef_PrefetchOp : MemRef_Op<"prefetch"> {
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getMemref().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemref().getType());
     }
     static StringRef getLocalityHintAttrStrName() { return "localityHint"; }
     static StringRef getIsWriteAttrStrName() { return "isWrite"; }
@@ -1519,7 +1519,7 @@ def MemRef_ReshapeOp: MemRef_Op<"reshape", [
      }]>];
 
   let extraClassDeclaration = [{
-    MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
     Value getViewSource() { return getSource(); }
   }];
 
@@ -1548,15 +1548,15 @@ class MemRef_ReassociativeReshapeOp<string mnemonic, list<Trait> traits = []> :
       SmallVector<ReassociationIndices, 4> reassociationIndices;
       for (auto attr : getReassociation())
         reassociationIndices.push_back(llvm::to_vector<2>(
-            llvm::map_range(attr.cast<ArrayAttr>(), [&](Attribute indexAttr) {
-              return indexAttr.cast<IntegerAttr>().getInt();
+            llvm::map_range(::llvm::cast<ArrayAttr>(attr), [&](Attribute indexAttr) {
+              return ::llvm::cast<IntegerAttr>(indexAttr).getInt();
             })));
       return reassociationIndices;
     };
 
-    MemRefType getSrcType() { return getSrc().getType().cast<MemRefType>(); }
+    MemRefType getSrcType() { return ::llvm::cast<MemRefType>(getSrc().getType()); }
 
-    MemRefType getResultType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getResultType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
 
     Value getViewSource() { return getSrc(); }
   }];
@@ -1750,7 +1750,7 @@ def MemRef_CollapseShapeOp : MemRef_ReassociativeReshapeOp<"collapse_shape", [
 def MemRef_StoreOp : MemRef_Op<"store",
      [TypesMatchWith<"type of 'value' matches element type of 'memref'",
                      "memref", "value",
-                     "$_self.cast<MemRefType>().getElementType()">,
+                     "::llvm::cast<MemRefType>($_self).getElementType()">,
       MemRefsNormalizable,
       DeclareOpInterfaceMethods<PromotableMemOpInterface>]> {
   let summary = "store operation";
@@ -1801,7 +1801,7 @@ def MemRef_StoreOp : MemRef_Op<"store",
       Value getMemRef() { return getOperand(1); }
       void setMemRef(Value value) { setOperand(1, value); }
       MemRefType getMemRefType() {
-        return getMemRef().getType().cast<MemRefType>();
+        return ::llvm::cast<MemRefType>(getMemRef().getType());
       }
   }];
 
@@ -2010,11 +2010,11 @@ def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [
   let extraClassDeclaration = extraBaseClassDeclaration # [{
     /// Returns the type of the base memref operand.
     MemRefType getSourceType() {
-      return getSource().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getSource().getType());
     }
 
     /// The result of a subview is always a memref.
-    MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
 
     /// A subview result type can be fully inferred from the source type and the
     /// static representation of offsets, sizes and strides. Special sentinels
@@ -2194,7 +2194,7 @@ def MemRef_ViewOp : MemRef_Op<"view", [
 
   let extraClassDeclaration = [{
     /// The result of a view is always a memref.
-    MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
+    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
 
     /// Returns the dynamic sizes for this view operation. This is redundant
     /// with `sizes` but needed in template implementations. More specifically:
@@ -2225,7 +2225,7 @@ def AtomicRMWOp : MemRef_Op<"atomic_rmw", [
       AllTypesMatch<["value", "result"]>,
       TypesMatchWith<"value type matches element type of memref",
                      "memref", "value",
-                     "$_self.cast<MemRefType>().getElementType()">
+                     "::llvm::cast<MemRefType>($_self).getElementType()">
     ]> {
   let summary = "atomic read-modify-write operation";
   let description = [{
@@ -2258,7 +2258,7 @@ def AtomicRMWOp : MemRef_Op<"atomic_rmw", [
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getMemref().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemref().getType());
     }
   }];
   let hasFolder = 1;

diff  --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
index 3264537424925..5bb02b082575a 100644
--- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
+++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td
@@ -124,9 +124,9 @@ class NVGPU_MmaSyncOp<string mnemonic> :
     std::array<int64_t, 3> getMmaShapeAsArray() {
       ArrayAttr mmaShape = this->getMmaShape();
       assert(mmaShape.size() == 3 && "mmaShape should be three integers");
-      return {mmaShape[0].cast<IntegerAttr>().getInt(),
-              mmaShape[1].cast<IntegerAttr>().getInt(),
-              mmaShape[2].cast<IntegerAttr>().getInt()};
+      return {::llvm::cast<IntegerAttr>(mmaShape[0]).getInt(),
+              ::llvm::cast<IntegerAttr>(mmaShape[1]).getInt(),
+              ::llvm::cast<IntegerAttr>(mmaShape[2]).getInt()};
     }
   }];
 

diff  --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
index b6a292e212963..f43eead18b300 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
@@ -49,7 +49,7 @@ def ReductionClauseInterface : OpInterface<"ReductionClauseInterface"> {
 
 def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
   let description = [{
-    Operations that represent a module for offloading (host or device) 
+    Operations that represent a module for offloading (host or device)
     should have this interface.
   }];
 
@@ -58,7 +58,7 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
   let methods = [
     InterfaceMethod<
       /*description=*/[{
-        Set the attribute IsDeviceAttr on the current module with the 
+        Set the attribute IsDeviceAttr on the current module with the
         specified boolean argument.
       }],
       /*retTy=*/"void",
@@ -77,29 +77,29 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
       /*methodName=*/"getIsDevice",
       (ins), [{}], [{
         if (Attribute isDevice = $_op->getAttr("omp.is_device"))
-          if (isDevice.isa<mlir::omp::IsDeviceAttr>())
-            return isDevice.dyn_cast<IsDeviceAttr>().getIsDevice();
+          if (::llvm::isa<mlir::omp::IsDeviceAttr>(isDevice))
+            return ::llvm::dyn_cast<IsDeviceAttr>(isDevice).getIsDevice();
         return false;
       }]>,
       InterfaceMethod<
       /*description=*/[{
-        Get the FlagsAttr attribute on the current module if it exists 
+        Get the FlagsAttr attribute on the current module if it exists
         and return the attribute, if it doesn't exit it returns a nullptr
       }],
       /*retTy=*/"mlir::omp::FlagsAttr",
-      /*methodName=*/"getFlags", 
+      /*methodName=*/"getFlags",
       (ins), [{}], [{
         if (Attribute flags = $_op->getAttr("omp.flags"))
           return flags.dyn_cast_or_null<mlir::omp::FlagsAttr>();
         return nullptr;
-      }]>,  
+      }]>,
       InterfaceMethod<
       /*description=*/[{
-        Apply an omp.FlagsAttr to a module with the specified values 
+        Apply an omp.FlagsAttr to a module with the specified values
         for the flags
       }],
       /*retTy=*/"void",
-      /*methodName=*/"setFlags", 
+      /*methodName=*/"setFlags",
       (ins "uint32_t":$debugKind,
             "bool":$assumeTeamsOversubscription,
             "bool":$assumeThreadsOversubscription,
@@ -107,7 +107,7 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
             "bool":$assumeNoNestedParallelism), [{}], [{
         $_op->setAttr(("omp." + mlir::omp::FlagsAttr::getMnemonic()).str(),
                   mlir::omp::FlagsAttr::get($_op->getContext(), debugKind,
-                      assumeTeamsOversubscription, assumeThreadsOversubscription, 
+                      assumeTeamsOversubscription, assumeThreadsOversubscription,
                       assumeNoThreadState, assumeNoNestedParallelism));
       }]>,
     InterfaceMethod<
@@ -140,12 +140,12 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
       }]>,
       InterfaceMethod<
       /*description=*/[{
-        Set a StringAttr on the current module containing the host IR file path. This 
+        Set a StringAttr on the current module containing the host IR file path. This
         file path is used in two-phase compilation during the device phase to generate
-        device side LLVM IR when lowering MLIR. 
+        device side LLVM IR when lowering MLIR.
       }],
       /*retTy=*/"void",
-      /*methodName=*/"setHostIRFilePath", 
+      /*methodName=*/"setHostIRFilePath",
       (ins "std::string":$hostIRFilePath), [{}], [{
         $_op->setAttr(
           mlir::StringAttr::get($_op->getContext(), llvm::Twine{"omp.host_ir_filepath"}),
@@ -153,17 +153,17 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
        }]>,
       InterfaceMethod<
       /*description=*/[{
-        Find the host-ir file path StringAttr from the current module if it exists and 
-        return its contained value, if it doesn't exist it returns an empty string. This 
+        Find the host-ir file path StringAttr from the current module if it exists and
+        return its contained value, if it doesn't exist it returns an empty string. This
         file path is used in two-phase compilation during the device phase to generate
-        device side LLVM IR when lowering MLIR. 
+        device side LLVM IR when lowering MLIR.
       }],
       /*retTy=*/"llvm::StringRef",
-      /*methodName=*/"getHostIRFilePath", 
+      /*methodName=*/"getHostIRFilePath",
       (ins), [{}], [{
         if (Attribute filepath = $_op->getAttr("omp.host_ir_filepath"))
-          if (filepath.isa<mlir::StringAttr>())
-            return filepath.dyn_cast<mlir::StringAttr>().getValue();
+          if (::llvm::isa<mlir::StringAttr>(filepath))
+            return ::llvm::dyn_cast<mlir::StringAttr>(filepath).getValue();
         return {};
       }]>
   ];

diff  --git a/mlir/include/mlir/Dialect/PDL/IR/PDLTypes.td b/mlir/include/mlir/Dialect/PDL/IR/PDLTypes.td
index d690fca937a9f..721668edead62 100644
--- a/mlir/include/mlir/Dialect/PDL/IR/PDLTypes.td
+++ b/mlir/include/mlir/Dialect/PDL/IR/PDLTypes.td
@@ -100,13 +100,13 @@ def PDL_Value : PDL_Type<"Value", "value"> {
 //===----------------------------------------------------------------------===//
 
 def PDL_AnyType : Type<
-  CPred<"$_self.isa<::mlir::pdl::PDLType>()">, "pdl type",
+  CPred<"::llvm::isa<::mlir::pdl::PDLType>($_self)">, "pdl type",
         "::mlir::pdl::PDLType">;
 
 // A range of positional values of one of the provided types.
 class PDL_RangeOf<Type positionalType> :
   ContainerType<AnyTypeOf<[positionalType]>, PDL_Range.predicate,
-                "$_self.cast<::mlir::pdl::RangeType>().getElementType()",
+                "::llvm::cast<::mlir::pdl::RangeType>($_self).getElementType()",
                 "range", "::mlir::pdl::RangeType">,
     BuildableType<"::mlir::pdl::RangeType::get(" # positionalType.builderCall #
                   ")">;

diff  --git a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
index 1e520ea61af0a..efb75f06970da 100644
--- a/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
+++ b/mlir/include/mlir/Dialect/PDLInterp/IR/PDLInterpOps.td
@@ -559,7 +559,7 @@ def PDLInterp_ExtractOp
   let builders = [
     OpBuilder<(ins "Value":$range, "unsigned":$index), [{
       build($_builder, $_state,
-            range.getType().cast<pdl::RangeType>().getElementType(),
+            ::llvm::cast<pdl::RangeType>(range.getType()).getElementType(),
             range, index);
     }]>,
   ];
@@ -964,7 +964,7 @@ def PDLInterp_GetValueTypeOp : PDLInterp_Op<"get_value_type", [Pure,
       Type valType = value.getType();
       Type typeType = $_builder.getType<pdl::TypeType>();
       build($_builder, $_state,
-            valType.isa<pdl::RangeType>() ? pdl::RangeType::get(typeType)
+            ::llvm::isa<pdl::RangeType>(valType) ? pdl::RangeType::get(typeType)
                                           : typeType,
             value);
     }]>

diff  --git a/mlir/include/mlir/Dialect/Quant/QuantOpsBase.td b/mlir/include/mlir/Dialect/Quant/QuantOpsBase.td
index f5f1ce86e08c7..559ec9c72552e 100644
--- a/mlir/include/mlir/Dialect/Quant/QuantOpsBase.td
+++ b/mlir/include/mlir/Dialect/Quant/QuantOpsBase.td
@@ -35,7 +35,7 @@ class quant_TypedPrimitiveOrContainer<Type etype> :
 
 // An implementation of QuantizedType.
 def quant_QuantizedType :
-    Type<CPred<"$_self.isa<mlir::quant::QuantizedType>()">, "QuantizedType">;
+    Type<CPred<"::llvm::isa<mlir::quant::QuantizedType>($_self)">, "QuantizedType">;
 
 // A primitive type that can represent a real value. This is either a
 // floating point value or a quantized type.
@@ -65,7 +65,7 @@ def quant_RealOrStorageValueType :
 // An implementation of UniformQuantizedType.
 def quant_UniformQuantizedType :
     DialectType<Quantization_Dialect,
-                CPred<"$_self.isa<UniformQuantizedType>()">,
+                CPred<"::llvm::isa<UniformQuantizedType>($_self)">,
                 "UniformQuantizedType">;
 
 // Predicate for detecting a container or primitive of UniformQuantizedType.

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 58c7c2ab985ee..26e47f49af055 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4046,7 +4046,7 @@ def SPIRV_MatrixLayoutAttr  :
 
 def SPIRV_VerCapExtAttr : DialectAttr<
     SPIRV_Dialect,
-    CPred<"$_self.isa<::mlir::spirv::VerCapExtAttr>()">,
+    CPred<"::llvm::isa<::mlir::spirv::VerCapExtAttr>($_self)">,
     "version-capability-extension attribute"> {
   let storageType = "::mlir::spirv::VerCapExtAttr";
   let returnType = "::mlir::spirv::VerCapExtAttr";
@@ -4068,17 +4068,17 @@ class SignlessOrUnsignedIntOfWidths<list<int> widths> :
     AnyTypeOf<!foreach(w, widths, IOrUI<w>),
               !interleave(widths, "/") # "-bit signless/unsigned integer">;
 
-def SPIRV_IsArrayType : CPred<"$_self.isa<::mlir::spirv::ArrayType>()">;
+def SPIRV_IsArrayType : CPred<"::llvm::isa<::mlir::spirv::ArrayType>($_self)">;
 def SPIRV_IsCooperativeMatrixType :
-  CPred<"$_self.isa<::mlir::spirv::CooperativeMatrixNVType>()">;
-def SPIRV_IsImageType : CPred<"$_self.isa<::mlir::spirv::ImageType>()">;
+  CPred<"::llvm::isa<::mlir::spirv::CooperativeMatrixNVType>($_self)">;
+def SPIRV_IsImageType : CPred<"::llvm::isa<::mlir::spirv::ImageType>($_self)">;
 def SPIRV_IsJointMatrixType :
-  CPred<"$_self.isa<::mlir::spirv::JointMatrixINTELType>()">;
-def SPIRV_IsMatrixType : CPred<"$_self.isa<::mlir::spirv::MatrixType>()">;
-def SPIRV_IsPtrType : CPred<"$_self.isa<::mlir::spirv::PointerType>()">;
-def SPIRV_IsRTArrayType : CPred<"$_self.isa<::mlir::spirv::RuntimeArrayType>()">;
-def SPIRV_IsSampledImageType : CPred<"$_self.isa<::mlir::spirv::SampledImageType>()">;
-def SPIRV_IsStructType : CPred<"$_self.isa<::mlir::spirv::StructType>()">;
+  CPred<"::llvm::isa<::mlir::spirv::JointMatrixINTELType>($_self)">;
+def SPIRV_IsMatrixType : CPred<"::llvm::isa<::mlir::spirv::MatrixType>($_self)">;
+def SPIRV_IsPtrType : CPred<"::llvm::isa<::mlir::spirv::PointerType>($_self)">;
+def SPIRV_IsRTArrayType : CPred<"::llvm::isa<::mlir::spirv::RuntimeArrayType>($_self)">;
+def SPIRV_IsSampledImageType : CPred<"::llvm::isa<::mlir::spirv::SampledImageType>($_self)">;
+def SPIRV_IsStructType : CPred<"::llvm::isa<::mlir::spirv::StructType>($_self)">;
 
 
 // See https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html#_types
@@ -4134,12 +4134,12 @@ def SPIRV_SignlessOrUnsignedInt : SignlessOrUnsignedIntOfWidths<[8, 16, 32, 64]>
 
 class SPIRV_CoopMatrixOfType<list<Type> allowedTypes> :
   ContainerType<AnyTypeOf<allowedTypes>, SPIRV_IsCooperativeMatrixType,
-    "$_self.cast<::mlir::spirv::CooperativeMatrixNVType>().getElementType()",
+    "::llvm::cast<::mlir::spirv::CooperativeMatrixNVType>($_self).getElementType()",
     "Cooperative Matrix">;
 
 class SPIRV_JointMatrixOfType<list<Type> allowedTypes> :
   ContainerType<AnyTypeOf<allowedTypes>, SPIRV_IsJointMatrixType,
-    "$_self.cast<::mlir::spirv::JointMatrixINTELType>().getElementType()",
+    "::llvm::cast<::mlir::spirv::JointMatrixINTELType>($_self).getElementType()",
     "Joint Matrix">;
 
 class SPIRV_ScalarOrVectorOf<Type type> :

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
index 4feca83eee6bb..b8307b488af6f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
@@ -179,7 +179,7 @@ def SPIRV_VectorExtractDynamicOp : SPIRV_Op<"VectorExtractDynamic", [
     Pure,
     TypesMatchWith<"type of 'result' matches element type of 'vector'",
                    "vector", "result",
-                   "$_self.cast<mlir::VectorType>().getElementType()">]> {
+                   "::llvm::cast<mlir::VectorType>($_self).getElementType()">]> {
   let summary = [{
     Extract a single, dynamically selected, component of a vector.
   }];
@@ -228,7 +228,7 @@ def SPIRV_VectorInsertDynamicOp : SPIRV_Op<"VectorInsertDynamic", [
     TypesMatchWith<
       "type of 'component' matches element type of 'vector'",
       "vector", "component",
-      "$_self.cast<mlir::VectorType>().getElementType()">,
+      "::llvm::cast<mlir::VectorType>($_self).getElementType()">,
     AllTypesMatch<["vector", "result"]>]> {
   let summary = [{
     Make a copy of a vector, with a single, variably selected, component

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
index 5d7338d7813bf..9b80354d19ded 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
@@ -147,7 +147,7 @@ def SPIRV_ImageOp : SPIRV_Op<"Image",
     [Pure,
      TypesMatchWith<"type of 'result' matches image type of 'sampledimage'",
                     "sampledimage", "result",
-                    "$_self.cast<spirv::SampledImageType>().getImageType()">]> {
+                    "::llvm::cast<spirv::SampledImageType>($_self).getImageType()">]> {
   let summary = "Extract the image from a sampled image.";
 
   let description = [{

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
index 78944e809899e..a7ca0147c029e 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
@@ -426,7 +426,7 @@ def SPIRV_GlobalVariableOp : SPIRV_Op<"GlobalVariable", [InModuleScope, Symbol]>
 
   let extraClassDeclaration = [{
     ::mlir::spirv::StorageClass storageClass() {
-      return this->getType().cast<::mlir::spirv::PointerType>().getStorageClass();
+      return ::llvm::cast<::mlir::spirv::PointerType>(this->getType()).getStorageClass();
     }
   }];
 }

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
index 5ba7ff8744795..931f917a04be9 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td
@@ -35,9 +35,9 @@ class SparseTensor_Attr<string name,
 def DimensionAttr :
     TypedAttrBase<
       Index, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()"
-                 ".isa<::mlir::IndexType>()">]>,
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::isa<::mlir::IndexType>("
+                     "::llvm::cast<::mlir::IntegerAttr>($_self).getType())">]>,
       "dimension attribute"> {
   let returnType = [{::mlir::sparse_tensor::Dimension}];
   let convertFromStorage = [{$_self.getValue().getZExtValue()}];
@@ -46,9 +46,9 @@ def DimensionAttr :
 def LevelAttr :
     TypedAttrBase<
       Index, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()"
-                 ".isa<::mlir::IndexType>()">]>,
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::isa<::mlir::IndexType>("
+                     "::llvm::cast<::mlir::IntegerAttr>($_self).getType())">]>,
       "level attribute"> {
   let returnType = [{::mlir::sparse_tensor::Level}];
   let convertFromStorage = [{$_self.getValue().getZExtValue()}];

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
index f29ea600e3347..ab630b80a7941 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
@@ -558,7 +558,7 @@ def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate", [Pure]>,
 def SparseTensor_InsertOp : SparseTensor_Op<"insert",
     [TypesMatchWith<"value type matches element type of tensor",
                     "tensor", "value",
-                    "$_self.cast<ShapedType>().getElementType()">,
+                    "::llvm::cast<ShapedType>($_self).getElementType()">,
      AllTypesMatch<["tensor", "result"]>]>,
     Arguments<(ins AnyType:$value,
                AnySparseTensor:$tensor,
@@ -600,7 +600,7 @@ def SparseTensor_InsertOp : SparseTensor_Op<"insert",
 def SparseTensor_PushBackOp : SparseTensor_Op<"push_back",
     [TypesMatchWith<"value type matches element type of inBuffer",
                     "inBuffer", "value",
-                    "$_self.cast<ShapedType>().getElementType()">,
+                    "::llvm::cast<ShapedType>($_self).getElementType()">,
      AllTypesMatch<["inBuffer", "outBuffer"]>]>,
     Arguments<(ins Index:$curSize,
                StridedMemRefRankOf<[AnyType], [1]>:$inBuffer,

diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
index 9a8c13ea810c2..654c6aff0c64d 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorTypes.td
@@ -73,10 +73,10 @@ def SparseTensor_StorageSpecifier : SparseTensor_Type<"StorageSpecifier"> {
 }
 
 def IsSparseTensorStorageSpecifierTypePred
-    : CPred<"$_self.isa<::mlir::sparse_tensor::StorageSpecifierType>()">;
+    : CPred<"::llvm::isa<::mlir::sparse_tensor::StorageSpecifierType>($_self)">;
 
 def SparseTensorStorageSpecifier
-    : Type<CPred<"$_self.isa<::mlir::sparse_tensor::StorageSpecifierType>()">, "metadata",
+    : Type<CPred<"::llvm::isa<::mlir::sparse_tensor::StorageSpecifierType>($_self)">, "metadata",
           "::mlir::sparse_tensor::StorageSpecifierType">;
 
 #endif // SPARSETENSOR_TYPES

diff  --git a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
index e0e97e7efdcc5..c7b65be888bf9 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
+++ b/mlir/include/mlir/Dialect/Tensor/IR/TensorOps.td
@@ -31,13 +31,13 @@ class Tensor_OpWithOffsetSizesAndStrides<string mnemonic,
     : Tensor_Op<mnemonic, traits> {
   code extraBaseClassDeclaration = [{
     /// Return the type of the base tensor operand.
-    ::mlir::RankedTensorType getSourceType() { 
-      return getSource().getType().cast<RankedTensorType>();
+    ::mlir::RankedTensorType getSourceType() {
+      return ::llvm::cast<RankedTensorType>(getSource().getType());
     }
 
     /// Return the type of the result tensor.
-    ::mlir::RankedTensorType getResultType() { 
-      return getResult().getType().cast<RankedTensorType>();
+    ::mlir::RankedTensorType getResultType() {
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
 
     /// Return the dynamic sizes for this subview operation if specified.
@@ -65,7 +65,7 @@ def Tensor_BitcastOp : Tensor_Op<"bitcast", [
   let description = [{
     Bitcast a tensor from one type to another type of equivalent element width.
     If both are ranked, then the rank should be the same and static dimensions
-    should match. 
+    should match.
 
     Example:
 
@@ -216,7 +216,7 @@ def Tensor_EmptyOp : Tensor_Op<"empty",
 
   let extraClassDeclaration = [{
     RankedTensorType getType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
 
     // Return both static and dynamic sizes as a list of `OpFoldResult`.
@@ -255,7 +255,7 @@ def Tensor_ExtractOp : Tensor_Op<"extract", [
     Pure,
     TypesMatchWith<"result type matches element type of tensor",
                    "tensor", "result",
-                   "$_self.cast<TensorType>().getElementType()">]> {
+                   "::llvm::cast<TensorType>($_self).getElementType()">]> {
   let summary = "element extraction operation";
   let description = [{
     The `tensor.extract` op reads a ranked tensor and returns one element as
@@ -487,8 +487,8 @@ def Tensor_FromElementsOp : Tensor_Op<"from_elements", [
     Pure,
     TypesMatchWith<"operand types match result element type",
                    "result", "elements", "SmallVector<Type, 2>("
-                   "$_self.cast<RankedTensorType>().getNumElements(), "
-                   "$_self.cast<RankedTensorType>().getElementType())">
+                   "::llvm::cast<RankedTensorType>($_self).getNumElements(), "
+                   "::llvm::cast<RankedTensorType>($_self).getElementType())">
   ]> {
   let summary = "tensor from elements operation.";
   let description = [{
@@ -649,13 +649,13 @@ def Tensor_GatherOp : Tensor_Op<"gather", [
                                             ArrayRef<int64_t> gatherDims,
                                             bool rankReduced);
     RankedTensorType getIndicesType() {
-      return getIndices().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getIndices().getType());
     }
     RankedTensorType getSourceType() {
-      return getSource().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getSource().getType());
     }
     RankedTensorType getResultType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
   }];
   let hasVerifier = 1;
@@ -721,7 +721,7 @@ def Tensor_InsertOp : Tensor_Op<"insert", [
                    "$_self">,
     TypesMatchWith<"scalar type matches element type of dest",
                    "dest", "scalar",
-                   "$_self.cast<TensorType>().getElementType()">]> {
+                   "::llvm::cast<TensorType>($_self).getElementType()">]> {
   let summary = "element insertion operation";
   let description = [{
     The `tensor.insert` op inserts a scalar into a ranked tensor `dest` as
@@ -983,7 +983,7 @@ def Tensor_ReshapeOp: Tensor_Op<"reshape", [
      }]>];
 
   let extraClassDeclaration = [{
-    TensorType getResultType() { return getResult().getType().cast<TensorType>(); }
+    TensorType getResultType() { return ::llvm::cast<TensorType>(getResult().getType()); }
   }];
 
   let assemblyFormat = [{
@@ -1011,16 +1011,16 @@ class Tensor_ReassociativeReshapeOp<string mnemonic, list<Trait> traits = []> :
       SmallVector<ReassociationIndices, 4> reassociationIndices;
       for (auto attr : getReassociation())
         reassociationIndices.push_back(llvm::to_vector<2>(
-            llvm::map_range(attr.cast<ArrayAttr>(), [&](Attribute indexAttr) {
-              return indexAttr.cast<IntegerAttr>().getInt();
+            llvm::map_range(::llvm::cast<ArrayAttr>(attr), [&](Attribute indexAttr) {
+              return ::llvm::cast<IntegerAttr>(indexAttr).getInt();
             })));
       return reassociationIndices;
     }
     RankedTensorType getSrcType() {
-      return getSrc().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getSrc().getType());
     }
     RankedTensorType getResultType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
   }];
 
@@ -1144,11 +1144,11 @@ def Tensor_CollapseShapeOp : Tensor_ReassociativeReshapeOp<"collapse_shape"> {
     }]>
   ];
 
-  let extraClassDeclaration = commonExtraClassDeclaration # [{  
+  let extraClassDeclaration = commonExtraClassDeclaration # [{
     static RankedTensorType
     inferCollapsedType(RankedTensorType type, ArrayRef<AffineMap> reassociation);
     static RankedTensorType
-    inferCollapsedType(RankedTensorType type, 
+    inferCollapsedType(RankedTensorType type,
                        SmallVector<ReassociationIndices> reassociation);
   }];
   let hasVerifier = 1;
@@ -1266,10 +1266,10 @@ def Tensor_PadOp : Tensor_Op<"pad", [
     }
 
     RankedTensorType getSourceType() {
-      return getSource().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getSource().getType());
     }
     RankedTensorType getResultType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
 
     // Infer the shape of the result tensor given the type of the source tensor
@@ -1449,11 +1449,11 @@ def Tensor_ParallelInsertSliceOp : Tensor_Op<"parallel_insert_slice", [
     Type yieldedType() { return getDest().getType(); }
 
     RankedTensorType getSourceType() {
-      return getSource().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getSource().getType());
     }
 
     RankedTensorType getDestType() {
-      return getDest().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getDest().getType());
     }
 
     ParallelCombiningOpInterface getParallelCombiningParent() {
@@ -1629,16 +1629,16 @@ def Tensor_ScatterOp : Tensor_Op<"scatter", [
 
   let extraClassDeclaration = [{
     RankedTensorType getDestType() {
-      return getDest().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getDest().getType());
     }
     RankedTensorType getIndicesType() {
-      return getIndices().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getIndices().getType());
     }
     RankedTensorType getSourceType() {
-      return getSource().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getSource().getType());
     }
     RankedTensorType getResultType() {
-      return getResult().getType().cast<RankedTensorType>();
+      return ::llvm::cast<RankedTensorType>(getResult().getType());
     }
   }];
   let hasVerifier = 1;
@@ -1653,7 +1653,7 @@ def Tensor_SplatOp : Tensor_Op<"splat", [
     Pure,
     TypesMatchWith<"operand type matches element type of result",
                    "aggregate", "input",
-                   "$_self.cast<TensorType>().getElementType()">
+                   "::llvm::cast<TensorType>($_self).getElementType()">
   ]> {
   let summary = "tensor splat or broadcast operation";
   let description = [{
@@ -1710,9 +1710,9 @@ class Tensor_RelayoutOp<string mnemonic, list<Trait> traits = []> :
     size_t getSourceRank() { return getSourceType().getRank(); };
     size_t getDestRank() { return getDestType().getRank(); };
     RankedTensorType getSourceType() {
-      return getSource().getType().cast<RankedTensorType>(); };
+      return ::llvm::cast<RankedTensorType>(getSource().getType()); };
     RankedTensorType getDestType() {
-      return getDest().getType().cast<RankedTensorType>(); };
+      return ::llvm::cast<RankedTensorType>(getDest().getType()); };
 
     /// Return position for init operand. Init operand is `dest`.
     std::pair<int64_t, int64_t> getDpsInitsPositionRange() {
@@ -1833,14 +1833,14 @@ def Tensor_PackOp : Tensor_RelayoutOp<"pack", [
         ArrayRef<int64_t> innerDimsPos, ArrayRef<int64_t> outerDimsPerm);
 
     /// Build and return a new PackOp that is a clone of the current PackOp with
-    /// (innerDimsPos, innerTiles) (resp. outerDimsPerm) are permuted by 
+    /// (innerDimsPos, innerTiles) (resp. outerDimsPerm) are permuted by
     /// innerPermutation (resp. outerPermutation).
     /// A new `tensor.empty` of the proper shape is built in the process.
     /// Asserts that:
     ///   - At least one of innerPermutation or outerPermutation is non-empty.
     ///   - If not empty, innerPermutation is a valid permutation of size
     ///     matching innerDimPos.
-    ///   - If not empty, outerPermutation is a valid permutation of size 
+    ///   - If not empty, outerPermutation is a valid permutation of size
     ///     matching outerDimsPerm.
     PackOp createTransposedClone(OpBuilder &b,
                                  Location loc,
@@ -1915,13 +1915,13 @@ def Tensor_UnPackOp : Tensor_RelayoutOp<"unpack"> {
         ArrayRef<int64_t> innerDimsPos, ArrayRef<int64_t> outerDimsPerm);
 
     /// Build and return a new UnPackOp that is a clone of the current UnPackOp
-    /// with (innerDimsPos, innerTiles) (resp. outerDimsPerm) are permuted by 
+    /// with (innerDimsPos, innerTiles) (resp. outerDimsPerm) are permuted by
     /// innerPermutation (resp. outerPermutation).
     /// Asserts that:
     ///   - At least one of innerPermutation or outerPermutation is non-empty.
     ///   - If not empty, innerPermutation is a valid permutation of size
     ///     matching innerDimPos.
-    ///   - If not empty, outerPermutation is a valid permutation of size 
+    ///   - If not empty, outerPermutation is a valid permutation of size
     ///     matching outerDimsPerm.
     UnPackOp createTransposedClone(OpBuilder &b,
                                    Location loc,

diff  --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
index 4e8aef11c67e3..3f166cc5060a9 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
@@ -24,8 +24,8 @@ include "mlir/IR/OpBase.td"
 // Where low and high ends are 0,255 when unsigned, -128,127 when signed, for
 // the 8-bit case.
 class Tosa_QuantizedType<string n, list<int> params, bit signed>
-  : Type<And<[CPred<"$_self.isa<mlir::quant::QuantizedType>()">,
-              CPred<"$_self.cast<mlir::quant::QuantizedType>()" #
+  : Type<And<[CPred<"::llvm::isa<mlir::quant::QuantizedType>($_self)">,
+              CPred<"::llvm::cast<mlir::quant::QuantizedType>($_self)" #
                     ".getStorageTypeIntegralWidth() == " # !head(params)>]>,
     "Q" # !if (signed, "int", "uint") # !head(params) # " type"> {
   string name = n;
@@ -170,7 +170,7 @@ def Tosa_Int64Like : Tosa_TypeLike<[Tosa_Int64], "signless-integer-64-bit-like">
 // Attribute predicates and classes.
 //===----------------------------------------------------------------------===//
 class DenseArrayMaxCt<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::DenseArrayAttr>().size() <= " # n>,
+    CPred<"::llvm::cast<::mlir::DenseArrayAttr>($_self).size() <= " # n>,
     "with at least " # n # " elements">;
 
 def Tosa_Fp32ArrayAttr2 : ConfinedAttr<DenseF32ArrayAttr, [DenseArrayCount<2>]>;
@@ -194,8 +194,8 @@ def Tosa_IntArrayAttrUpto5 : ConfinedAttr<DenseI64ArrayAttr, [DenseArrayMaxCt<5>
 //===----------------------------------------------------------------------===//
 // Supported regimes for tosa.resize.
 def Tosa_ResizeTypeAttr : StringBasedAttr<
-    CPred<"$_self.cast<StringAttr>().getValue() == \"BILINEAR\"  || " #
-          "$_self.cast<StringAttr>().getValue() == \"NEAREST_NEIGHBOR\"">,
+    CPred<"::llvm::cast<StringAttr>($_self).getValue() == \"BILINEAR\"  || " #
+          "::llvm::cast<StringAttr>($_self).getValue() == \"NEAREST_NEIGHBOR\"">,
     "Supported resize/upsampling strategies">;
 
 def Tosa_TensorTypeAttr : TypeAttrBase<"TensorType", "Tensor type attribute">;

diff  --git a/mlir/include/mlir/Dialect/Transform/IR/TransformTypes.td b/mlir/include/mlir/Dialect/Transform/IR/TransformTypes.td
index 2a2ecc287392e..d5e7dd03ecb77 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/TransformTypes.td
+++ b/mlir/include/mlir/Dialect/Transform/IR/TransformTypes.td
@@ -63,7 +63,7 @@ def Transform_AnyValue : TypeDef<Transform_Dialect, "AnyValue",
 
 class Transform_ConcreteOpType<string opname>
   : Type<And<[Transform_OperationType.predicate,
-              CPred<"$_self.cast<::mlir::transform::OperationType>()"
+              CPred<"::llvm::cast<::mlir::transform::OperationType>($_self)"
                     ".getOperationName() == \"" # opname # "\"">]>,
          "Transform IR handle to " # opname # " operations",
          "::mlir::transform::OperationType">;

diff  --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index dec6addf72cc4..e4909a24dbeea 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -222,10 +222,10 @@ def Vector_ContractionOp :
   ];
   let extraClassDeclaration = [{
     VectorType getLhsType() {
-      return getLhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getLhs().getType());
     }
     VectorType getRhsType() {
-      return getRhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRhs().getType());
     }
     Type getAccType() { return getAcc().getType(); }
     Type getResultType() { return getResult().getType(); }
@@ -301,7 +301,7 @@ def Vector_ReductionOp :
   }];
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
   }];
   let builders = [
@@ -357,7 +357,7 @@ def Vector_MultiDimReductionOp :
     static StringRef getReductionDimsAttrStrName() { return "reduction_dims"; }
 
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     Type getDestType() {
       return getDest().getType();
@@ -429,7 +429,7 @@ def Vector_BroadcastOp :
   let extraClassDeclaration = [{
     Type getSourceType() { return getSource().getType(); }
     VectorType getResultVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
 
     /// Return the dimensions of the result vector that were formerly ones in the
@@ -511,13 +511,13 @@ def Vector_ShuffleOp :
   let extraClassDeclaration = [{
     static StringRef getMaskAttrStrName() { return "mask"; }
     VectorType getV1VectorType() {
-      return getV1().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getV1().getType());
     }
     VectorType getV2VectorType() {
-      return getV2().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getV2().getType());
     }
     VectorType getResultVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
   }];
   let assemblyFormat = "operands $mask attr-dict `:` type(operands)";
@@ -529,7 +529,7 @@ def Vector_ExtractElementOp :
   Vector_Op<"extractelement", [Pure,
      TypesMatchWith<"result type matches element type of vector operand",
                     "vector", "result",
-                    "$_self.cast<VectorType>().getElementType()">]>,
+                    "::llvm::cast<VectorType>($_self).getElementType()">]>,
     Arguments<(ins AnyVectorOfAnyRank:$vector,
                    Optional<AnySignlessIntegerOrIndex>:$position)>,
     Results<(outs AnyType:$result)> {
@@ -564,7 +564,7 @@ def Vector_ExtractElementOp :
   ];
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
   }];
   let hasVerifier = 1;
@@ -599,7 +599,7 @@ def Vector_ExtractOp :
   let extraClassDeclaration = [{
     static StringRef getPositionAttrStrName() { return "position"; }
     VectorType getSourceVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
     static bool isCompatibleReturnTypes(TypeRange l, TypeRange r);
   }];
@@ -636,7 +636,7 @@ def Vector_FMAOp :
   }];
   let assemblyFormat = "$lhs `,` $rhs `,` $acc attr-dict `:` type($lhs)";
   let extraClassDeclaration = [{
-    VectorType getVectorType() { return getLhs().getType().cast<VectorType>(); }
+    VectorType getVectorType() { return ::llvm::cast<VectorType>(getLhs().getType()); }
   }];
 }
 
@@ -644,7 +644,7 @@ def Vector_InsertElementOp :
   Vector_Op<"insertelement", [Pure,
      TypesMatchWith<"source operand type matches element type of result",
                     "result", "source",
-                    "$_self.cast<VectorType>().getElementType()">,
+                    "::llvm::cast<VectorType>($_self).getElementType()">,
      AllTypesMatch<["dest", "result"]>]>,
      Arguments<(ins AnyType:$source, AnyVectorOfAnyRank:$dest,
                     Optional<AnySignlessIntegerOrIndex>:$position)>,
@@ -681,7 +681,7 @@ def Vector_InsertElementOp :
   let extraClassDeclaration = [{
     Type getSourceType() { return getSource().getType(); }
     VectorType getDestVectorType() {
-      return getDest().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getDest().getType());
     }
   }];
   let hasVerifier = 1;
@@ -722,7 +722,7 @@ def Vector_InsertOp :
     static StringRef getPositionAttrStrName() { return "position"; }
     Type getSourceType() { return getSource().getType(); }
     VectorType getDestVectorType() {
-      return getDest().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getDest().getType());
     }
   }];
 
@@ -780,10 +780,10 @@ def Vector_ScalableInsertOp :
 
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getDestVectorType() {
-      return getDest().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getDest().getType());
     }
   }];
 }
@@ -831,10 +831,10 @@ def Vector_ScalableExtractOp :
 
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getResultVectorType() {
-      return getRes().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getRes().getType());
     }
   }];
 }
@@ -881,14 +881,14 @@ def Vector_InsertStridedSliceOp :
     static StringRef getOffsetsAttrStrName() { return "offsets"; }
     static StringRef getStridesAttrStrName() { return "strides"; }
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getDestVectorType() {
-      return getDest().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getDest().getType());
     }
     bool hasNonUnitStrides() {
       return llvm::any_of(getStrides(), [](Attribute attr) {
-        return attr.cast<IntegerAttr>().getInt() != 1;
+        return ::llvm::cast<IntegerAttr>(attr).getInt() != 1;
       });
     }
   }];
@@ -964,7 +964,7 @@ def Vector_OuterProductOp :
   ];
   let extraClassDeclaration = [{
     VectorType getOperandVectorTypeLHS() {
-      return getLhs().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getLhs().getType());
     }
     Type getOperandTypeRHS() {
       return getRhs().getType();
@@ -972,10 +972,10 @@ def Vector_OuterProductOp :
     VectorType getOperandVectorTypeACC() {
       return getAcc().empty()
         ? VectorType()
-        : (*getAcc().begin()).getType().cast<VectorType>();
+        : ::llvm::cast<VectorType>((*getAcc().begin()).getType());
     }
     VectorType getResultVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
     static constexpr StringRef getKindAttrStrName() {
       return "kind";
@@ -1082,10 +1082,10 @@ def Vector_ReshapeOp :
 
   let extraClassDeclaration = [{
     VectorType getInputVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
     VectorType getOutputVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
 
     /// Returns as integer value the number of input shape operands.
@@ -1151,12 +1151,12 @@ def Vector_ExtractStridedSliceOp :
     static StringRef getSizesAttrStrName() { return "sizes"; }
     static StringRef getStridesAttrStrName() { return "strides"; }
     VectorType getSourceVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
     void getOffsets(SmallVectorImpl<int64_t> &results);
     bool hasNonUnitStrides() {
       return llvm::any_of(getStrides(), [](Attribute attr) {
-        return attr.cast<IntegerAttr>().getInt() != 1;
+        return ::llvm::cast<IntegerAttr>(attr).getInt() != 1;
       });
     }
   }];
@@ -1622,11 +1622,11 @@ def Vector_LoadOp : Vector_Op<"load"> {
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
 
     VectorType getVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
 
@@ -1699,11 +1699,11 @@ def Vector_StoreOp : Vector_Op<"store"> {
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
 
     VectorType getVectorType() {
-      return getValueToStore().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getValueToStore().getType());
     }
   }];
 
@@ -1758,16 +1758,16 @@ def Vector_MaskedLoadOp :
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getMaskVectorType() {
-      return getMask().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getMask().getType());
     }
     VectorType getPassThruVectorType() {
-      return getPassThru().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getPassThru().getType());
     }
     VectorType getVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
   let assemblyFormat = "$base `[` $indices `]` `,` $mask `,` $pass_thru attr-dict `:` "
@@ -1820,13 +1820,13 @@ def Vector_MaskedStoreOp :
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getMaskVectorType() {
-      return getMask().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getMask().getType());
     }
     VectorType getVectorType() {
-      return getValueToStore().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getValueToStore().getType());
     }
   }];
   let assemblyFormat =
@@ -2017,16 +2017,16 @@ def Vector_ExpandLoadOp :
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getMaskVectorType() {
-      return getMask().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getMask().getType());
     }
     VectorType getPassThruVectorType() {
-      return getPassThru().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getPassThru().getType());
     }
     VectorType getVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
   let assemblyFormat = "$base `[` $indices `]` `,` $mask `,` $pass_thru attr-dict `:` "
@@ -2080,13 +2080,13 @@ def Vector_CompressStoreOp :
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getBase().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getBase().getType());
     }
     VectorType getMaskVectorType() {
-      return getMask().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getMask().getType());
     }
     VectorType getVectorType() {
-      return getValueToStore().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getValueToStore().getType());
     }
   }];
   let assemblyFormat =
@@ -2136,10 +2136,10 @@ def Vector_ShapeCastOp :
   }];
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getResultVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
   let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
@@ -2177,10 +2177,10 @@ def Vector_BitCastOp :
   }];
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getResultVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
   }];
   let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
@@ -2220,10 +2220,10 @@ def Vector_TypeCastOp :
 
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
-      return getMemref().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getMemref().getType());
     }
     MemRefType getResultMemRefType() {
-      return getResult().getType().cast<MemRefType>();
+      return ::llvm::cast<MemRefType>(getResult().getType());
     }
     // Implement ViewLikeOpInterface.
     Value getViewSource() { return getMemref(); }
@@ -2438,10 +2438,10 @@ def Vector_TransposeOp :
   ];
   let extraClassDeclaration = [{
     VectorType getSourceVectorType() {
-      return getVector().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getVector().getType());
     }
     VectorType getResultVectorType() {
-      return getResult().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getResult().getType());
     }
     void getTransp(SmallVectorImpl<int64_t> &results);
     static StringRef getTranspAttrStrName() { return "transp"; }
@@ -2555,7 +2555,7 @@ def Vector_MatmulOp : Vector_Op<"matrix_multiply", [Pure,
      $_state.addAttribute("lhs_columns",$_builder.getI32IntegerAttr(lhsColumns));
      $_state.addAttribute("rhs_columns",$_builder.getI32IntegerAttr(rhsColumns));
      $_state.addTypes(VectorType::get(lhsRows * rhsColumns,
-       lhs.getType().cast<VectorType>().getElementType()));
+       ::llvm::cast<VectorType>(lhs.getType()).getElementType()));
    }]>,
   ];
   let assemblyFormat = "$lhs `,` $rhs attr-dict "
@@ -2613,7 +2613,7 @@ def Vector_SplatOp : Vector_Op<"splat", [
     Pure,
     TypesMatchWith<"operand type matches element type of result",
                    "aggregate", "input",
-                   "$_self.cast<VectorType>().getElementType()">
+                   "::llvm::cast<VectorType>($_self).getElementType()">
   ]> {
   let summary = "vector splat or broadcast operation";
   let description = [{
@@ -2712,16 +2712,16 @@ def Vector_ScanOp :
     static StringRef getKindAttrStrName() { return "kind"; }
     static StringRef getReductionDimAttrStrName() { return "reduction_dim"; }
     VectorType getSourceType() {
-      return getSource().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getSource().getType());
     }
     VectorType getDestType() {
-      return getDest().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getDest().getType());
     }
     VectorType getAccumulatorType() {
-      return getAccumulatedValue().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getAccumulatedValue().getType());
     }
     VectorType getInitialValueType() {
-      return getInitialValue().getType().cast<VectorType>();
+      return ::llvm::cast<VectorType>(getInitialValue().getType());
     }
   }];
   let assemblyFormat =

diff  --git a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
index 483e9236c9e9b..fa3f0ee0460b1 100644
--- a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
+++ b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
@@ -60,7 +60,7 @@ def MaskCompressOp : AVX512_Op<"mask.compress", [Pure,
   AllTypesMatch<["a", "dst"]>,
   TypesMatchWith<"`k` has the same number of bits as elements in `dst`",
                  "dst", "k",
-                 "VectorType::get({$_self.cast<VectorType>().getShape()[0]}, "
+                 "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0]}, "
                  "IntegerType::get($_self.getContext(), 1))">]> {
   let summary = "Masked compress op";
   let description = [{
@@ -95,7 +95,7 @@ def MaskCompressIntrOp : AVX512_IntrOverloadedOp<"mask.compress", [
   AllTypesMatch<["a", "src", "res"]>,
   TypesMatchWith<"`k` has the same number of bits as elements in `res`",
                  "res", "k",
-                 "VectorType::get({$_self.cast<VectorType>().getShape()[0]}, "
+                 "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0]}, "
                  "IntegerType::get($_self.getContext(), 1))">]> {
   let arguments = (ins VectorOfLengthAndType<[16, 8],
                                              [F32, I32, F64, I64]>:$a,
@@ -114,7 +114,7 @@ def MaskRndScaleOp : AVX512_Op<"mask.rndscale", [Pure,
   TypesMatchWith<"imm has the same number of bits as elements in dst",
                  "dst", "imm",
                  "IntegerType::get($_self.getContext(), "
-                 "($_self.cast<VectorType>().getShape()[0]))">]> {
+                 "(::llvm::cast<VectorType>($_self).getShape()[0]))">]> {
   let summary = "Masked roundscale op";
   let description = [{
     The mask.rndscale op is an AVX512 specific op that can lower to the proper
@@ -170,7 +170,7 @@ def MaskScaleFOp : AVX512_Op<"mask.scalef", [Pure,
   TypesMatchWith<"k has the same number of bits as elements in dst",
                  "dst", "k",
                  "IntegerType::get($_self.getContext(), "
-                 "($_self.cast<VectorType>().getShape()[0]))">]> {
+                 "(::llvm::cast<VectorType>($_self).getShape()[0]))">]> {
   let summary = "ScaleF op";
   let description = [{
     The `mask.scalef` op is an AVX512 specific op that can lower to the proper
@@ -226,13 +226,13 @@ def Vp2IntersectOp : AVX512_Op<"vp2intersect", [Pure,
   AllTypesMatch<["a", "b"]>,
   TypesMatchWith<"k1 has the same number of bits as elements in a",
                  "a", "k1",
-                 "VectorType::get({$_self.cast<VectorType>().getShape()[0]}, "
+                 "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0]}, "
                  "IntegerType::get($_self.getContext(), 1))">,
   TypesMatchWith<"k2 has the same number of bits as elements in b",
                  // Should use `b` instead of `a`, but that would require
                  // adding `type($b)` to assemblyFormat.
                  "a", "k2",
-                 "VectorType::get({$_self.cast<VectorType>().getShape()[0]}, "
+                 "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0]}, "
                  "IntegerType::get($_self.getContext(), 1))">]> {
   let summary = "Vp2Intersect op";
   let description = [{

diff  --git a/mlir/include/mlir/IR/AttrTypeBase.td b/mlir/include/mlir/IR/AttrTypeBase.td
index e9c25eef7f477..996ad71502d3f 100644
--- a/mlir/include/mlir/IR/AttrTypeBase.td
+++ b/mlir/include/mlir/IR/AttrTypeBase.td
@@ -254,12 +254,12 @@ class AttrDef<Dialect dialect, string name, list<Trait> traits = [],
   //
   // For example, `$_self.getValue().getSExtValue()` for `IntegerAttr val` will
   // expand to `getAttrOfType<IntegerAttr>("val").getValue().getSExtValue()`.
-  let convertFromStorage = "$_self.cast<" # dialect.cppNamespace #
-                                 "::" # cppClassName # ">()";
+  let convertFromStorage = "::llvm::cast<" # dialect.cppNamespace #
+                                 "::" # cppClassName # ">($_self)";
 
   // The predicate for when this def is used as a constraint.
-  let predicate = CPred<"$_self.isa<" # dialect.cppNamespace #
-                                 "::" # cppClassName # ">()">;
+  let predicate = CPred<"::llvm::isa<" # dialect.cppNamespace #
+                                 "::" # cppClassName # ">($_self)">;
 }
 
 // Define a new type, named `name`, belonging to `dialect` that inherits from
@@ -277,8 +277,8 @@ class TypeDef<Dialect dialect, string name, list<Trait> traits = [],
                                "::" # cppClassName # ">()",
                            "");
   // The predicate for when this def is used as a constraint.
-  let predicate = CPred<"$_self.isa<" # dialect.cppNamespace #
-                                 "::" # cppClassName # ">()">;
+  let predicate = CPred<"::llvm::isa<" # dialect.cppNamespace #
+                                 "::" # cppClassName # ">($_self)">;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/include/mlir/IR/BuiltinAttributeInterfaces.td b/mlir/include/mlir/IR/BuiltinAttributeInterfaces.td
index 39c871d7cab15..c741db9b47f34 100644
--- a/mlir/include/mlir/IR/BuiltinAttributeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinAttributeInterfaces.td
@@ -397,7 +397,7 @@ def ElementsAttrInterface : AttrInterface<"ElementsAttr", [TypedAttrInterface]>
       detail::ElementsAttrRange<DerivedAttrValueIterator<T>>;
     template <typename T, typename = DerivedAttrValueCheckT<T>>
     DerivedAttrValueIteratorRange<T> getValues() const {
-      auto castFn = [](Attribute attr) { return attr.template cast<T>(); };
+      auto castFn = [](Attribute attr) { return ::llvm::cast<T>(attr); };
       return {getShapedType(), llvm::map_range(getValues<Attribute>(),
               static_cast<T (*)(Attribute)>(castFn))};
     }
@@ -432,7 +432,7 @@ def ElementsAttrInterface : AttrInterface<"ElementsAttr", [TypedAttrInterface]>
       if (!values)
         return std::nullopt;
 
-      auto castFn = [](Attribute attr) { return attr.template cast<T>(); };
+      auto castFn = [](Attribute attr) { return ::llvm::cast<T>(attr); };
       return DerivedAttrValueIteratorRange<T>(
         getShapedType(),
         llvm::map_range(*values, static_cast<T (*)(Attribute)>(castFn))

diff  --git a/mlir/include/mlir/IR/BuiltinAttributes.td b/mlir/include/mlir/IR/BuiltinAttributes.td
index 01ab1a765aae0..3273485d729f1 100644
--- a/mlir/include/mlir/IR/BuiltinAttributes.td
+++ b/mlir/include/mlir/IR/BuiltinAttributes.td
@@ -116,8 +116,8 @@ def Builtin_ArrayAttr : Builtin_Attr<"Array"> {
     public:
       explicit attr_value_iterator(ArrayAttr::iterator it)
           : llvm::mapped_iterator<ArrayAttr::iterator, AttrTy (*)(Attribute)>(
-                it, [](Attribute attr) { return attr.cast<AttrTy>(); }) {}
-      AttrTy operator*() const { return (*this->I).template cast<AttrTy>(); }
+                it, [](Attribute attr) { return ::llvm::cast<AttrTy>(attr); }) {}
+      AttrTy operator*() const { return ::llvm::cast<AttrTy>(*this->I); }
     };
 
   public:
@@ -622,13 +622,13 @@ def Builtin_FloatAttr : Builtin_Attr<"Float", [TypedAttrInterface]> {
       return $_get(type.getContext(), type, value);
     }]>,
     AttrBuilderWithInferredContext<(ins "Type":$type, "double":$value), [{
-      if (type.isF64() || !type.isa<FloatType>())
+      if (type.isF64() || !::llvm::isa<FloatType>(type))
         return $_get(type.getContext(), type, APFloat(value));
 
       // This handles, e.g., F16 because there is no APFloat constructor for it.
       bool unused;
       APFloat val(value);
-      val.convert(type.cast<FloatType>().getFloatSemantics(),
+      val.convert(::llvm::cast<FloatType>(type).getFloatSemantics(),
                   APFloat::rmNearestTiesToEven, &unused);
       return $_get(type.getContext(), type, val);
     }]>
@@ -695,7 +695,7 @@ def Builtin_IntegerAttr : Builtin_Attr<"Integer", [TypedAttrInterface]> {
         return $_get(type.getContext(), type, apValue);
       }
 
-      IntegerType intTy = type.cast<IntegerType>();
+      IntegerType intTy = ::llvm::cast<IntegerType>(type);
       APInt apValue(intTy.getWidth(), value, intTy.isSignedInteger());
       return $_get(type.getContext(), type, apValue);
     }]>
@@ -848,11 +848,11 @@ def Builtin_SparseElementsAttr : Builtin_Attr<
                                         "DenseElementsAttr":$values), [{
       assert(indices.getType().getElementType().isInteger(64) &&
              "expected sparse indices to be 64-bit integer values");
-      assert((type.isa<RankedTensorType, VectorType>()) &&
+      assert((::llvm::isa<RankedTensorType, VectorType>(type)) &&
              "type must be ranked tensor or vector");
       assert(type.hasStaticShape() && "type must have static shape");
       return $_get(type.getContext(), type,
-                   indices.cast<DenseIntElementsAttr>(), values);
+                   ::llvm::cast<DenseIntElementsAttr>(indices), values);
     }]>,
   ];
   let extraClassDeclaration = [{
@@ -905,7 +905,7 @@ def Builtin_SparseElementsAttr : Builtin_Attr<
     template <typename T>
     std::enable_if_t<std::is_base_of<Attribute, T>::value, T>
     getZeroValue() const {
-      return getZeroAttr().template cast<T>();
+      return ::llvm::cast<T>(getZeroAttr());
     }
     /// Get a zero for an APInt.
     template <typename T>

diff  --git a/mlir/include/mlir/IR/BuiltinDialectBytecode.td b/mlir/include/mlir/IR/BuiltinDialectBytecode.td
index 29addb43e77ed..a2aab3ad96a75 100644
--- a/mlir/include/mlir/IR/BuiltinDialectBytecode.td
+++ b/mlir/include/mlir/IR/BuiltinDialectBytecode.td
@@ -51,7 +51,7 @@ let cType = "StringAttr" in {
 def StringAttr : DialectAttribute<(attr
   String:$value
 )> {
-  let printerPredicate = "$_val.getType().isa<NoneType>()";
+  let printerPredicate = "::llvm::isa<NoneType>($_val.getType())";
 }
 
 //  ///   StringAttrWithType {
@@ -63,7 +63,7 @@ def StringAttr : DialectAttribute<(attr
 def StringAttrWithType : DialectAttribute<(attr
   String:$value,
   Type:$type
-)> { let printerPredicate = "!$_val.getType().isa<NoneType>()"; }
+)> { let printerPredicate = "!::llvm::isa<NoneType>($_val.getType())"; }
 }
 
 //  ///   DictionaryAttr {
@@ -321,7 +321,7 @@ def IntegerType : DialectType<(type
     "static_cast<IntegerType::SignednessSemantics>(_widthAndSignedness & 0x3)">:$signedness
 )>;
 
-// 
+//
 //   ///   IndexType {
 //   ///   }
 //   ///

diff  --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td
index c703d6a79c561..218c240743ae6 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.td
+++ b/mlir/include/mlir/IR/BuiltinTypes.td
@@ -1072,7 +1072,7 @@ def Builtin_Vector : Builtin_Type<"Vector", [ShapedTypeInterface], "Type"> {
     /// type. In particular, vectors can consist of integer, index, or float
     /// primitives.
     static bool isValidElementType(Type t) {
-      return t.isa<IntegerType, IndexType, FloatType>();
+      return ::llvm::isa<IntegerType, IndexType, FloatType>(t);
     }
 
     /// Returns true if the vector contains scalable dimensions.

diff  --git a/mlir/include/mlir/IR/EnumAttr.td b/mlir/include/mlir/IR/EnumAttr.td
index 14fbfb9f0997f..64a2ea13b557f 100644
--- a/mlir/include/mlir/IR/EnumAttr.td
+++ b/mlir/include/mlir/IR/EnumAttr.td
@@ -34,7 +34,7 @@ class IntEnumAttrCaseBase<I intType, string sym, string strVal, int intVal> :
     EnumAttrCaseInfo<sym, intVal, strVal>,
     SignlessIntegerAttrBase<intType, "case " # strVal> {
   let predicate =
-    CPred<"$_self.cast<::mlir::IntegerAttr>().getInt() == " # intVal>;
+    CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getInt() == " # intVal>;
 }
 
 // Cases of integer enum attributes with a specific type. By default, the string
@@ -250,7 +250,7 @@ class BitEnumAttrBase<I intType, list<BitEnumAttrCaseBase> cases,
   let predicate = And<[
     SignlessIntegerAttrBase<intType, summary>.predicate,
     // Make sure we don't have unknown bit set.
-    CPred<"!($_self.cast<::mlir::IntegerAttr>().getValue().getZExtValue() & (~("
+    CPred<"!(::llvm::cast<::mlir::IntegerAttr>($_self).getValue().getZExtValue() & (~("
           # !interleave(!foreach(case, cases, case.value # "u"), "|") #
           ")))">
   ]>;

diff  --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td
index b02c0eafa0c24..2dd62c114b07b 100644
--- a/mlir/include/mlir/IR/OpBase.td
+++ b/mlir/include/mlir/IR/OpBase.td
@@ -72,7 +72,7 @@ class Pred;
 // * `$_op` will be replaced by the current operation.
 // * `$_self` will be replaced with the entity this predicate is attached to.
 //   E.g., `BoolAttr` is an attribute constraint that wraps a
-//   `CPred<"$_self.isa<BoolAttr>()">` (see the following sections for details).
+//   `CPred<"::llvm::isa<BoolAttr>($_self)">` (see the following sections for details).
 //   Then for `F32:$attr`,`$_self` will be replaced by `$attr`.
 //   For type constraints, it's a little bit special since we want the
 //   constraints on each type definition reads naturally and we want to attach
@@ -274,52 +274,52 @@ class SuccessorConstraint<Pred predicate, string summary = ""> :
 
 // Whether a type is a VectorType.
 // Explicitly disallow 0-D vectors for now until we have good enough coverage.
-def IsVectorTypePred : And<[CPred<"$_self.isa<::mlir::VectorType>()">,
-                            CPred<"$_self.cast<::mlir::VectorType>().getRank() > 0">]>;
+def IsVectorTypePred : And<[CPred<"::llvm::isa<::mlir::VectorType>($_self)">,
+                            CPred<"::llvm::cast<::mlir::VectorType>($_self).getRank() > 0">]>;
 
 // Temporary vector type clone that allows gradual transition to 0-D vectors.
 // TODO: Remove this when all ops support 0-D vectors.
-def IsVectorOfAnyRankTypePred : CPred<"$_self.isa<::mlir::VectorType>()">;
+def IsVectorOfAnyRankTypePred : CPred<"::llvm::isa<::mlir::VectorType>($_self)">;
 
 // Whether a type is a fixed-length VectorType.
-def IsFixedVectorTypePred : CPred<[{$_self.isa<::mlir::VectorType>() &&
-                                  !$_self.cast<VectorType>().isScalable()}]>;
+def IsFixedVectorTypePred : CPred<[{::llvm::isa<::mlir::VectorType>($_self) &&
+                                  !::llvm::cast<VectorType>($_self).isScalable()}]>;
 
 // Whether a type is a scalable VectorType.
-def IsScalableVectorTypePred : CPred<[{$_self.isa<::mlir::VectorType>() &&
-                                   $_self.cast<VectorType>().isScalable()}]>;
+def IsScalableVectorTypePred : CPred<[{::llvm::isa<::mlir::VectorType>($_self) &&
+                                   ::llvm::cast<VectorType>($_self).isScalable()}]>;
 
 // Whether a type is a TensorType.
-def IsTensorTypePred : CPred<"$_self.isa<::mlir::TensorType>()">;
+def IsTensorTypePred : CPred<"::llvm::isa<::mlir::TensorType>($_self)">;
 
 // Whether a type is a MemRefType.
-def IsMemRefTypePred : CPred<"$_self.isa<::mlir::MemRefType>()">;
+def IsMemRefTypePred : CPred<"::llvm::isa<::mlir::MemRefType>($_self)">;
 
 // Whether a type is an UnrankedMemRefType
 def IsUnrankedMemRefTypePred
-        : CPred<"$_self.isa<::mlir::UnrankedMemRefType>()">;
+        : CPred<"::llvm::isa<::mlir::UnrankedMemRefType>($_self)">;
 
 // Whether a type is an UnrankedTensorType
 def IsUnrankedTensorTypePred
-        : CPred<"$_self.isa<::mlir::UnrankedTensorType>()">;
+        : CPred<"::llvm::isa<::mlir::UnrankedTensorType>($_self)">;
 
 // Whether a type is a RankedTensorType
 def IsRankedTensorTypePred
-        : CPred<"$_self.isa<::mlir::RankedTensorType>()">;
+        : CPred<"::llvm::isa<::mlir::RankedTensorType>($_self)">;
 
 // Whether a type is a BaseMemRefType
 def IsBaseMemRefTypePred
-        : CPred<"$_self.isa<::mlir::BaseMemRefType>()">;
+        : CPred<"::llvm::isa<::mlir::BaseMemRefType>($_self)">;
 
 // Whether a type is a ShapedType.
-def IsShapedTypePred : CPred<"$_self.isa<::mlir::ShapedType>()">;
+def IsShapedTypePred : CPred<"::llvm::isa<::mlir::ShapedType>($_self)">;
 
 // For a ShapedType, verify that it has a static shape.
 def HasStaticShapePred :
-        CPred<"$_self.cast<::mlir::ShapedType>().hasStaticShape()">;
+        CPred<"::llvm::cast<::mlir::ShapedType>($_self).hasStaticShape()">;
 
 // Whether a type is a TupleType.
-def IsTupleTypePred : CPred<"$_self.isa<::mlir::TupleType>()">;
+def IsTupleTypePred : CPred<"::llvm::isa<::mlir::TupleType>($_self)">;
 
 //===----------------------------------------------------------------------===//
 // Type definitions
@@ -394,7 +394,7 @@ class SameBuildabilityAs<Type type, code builder> {
 def AnyType : Type<CPred<"true">, "any type">;
 
 // None type
-def NoneType : Type<CPred<"$_self.isa<::mlir::NoneType>()">, "none type",
+def NoneType : Type<CPred<"::llvm::isa<::mlir::NoneType>($_self)">, "none type",
                     "::mlir::NoneType">,
       BuildableType<"$_builder.getType<::mlir::NoneType>()">;
 
@@ -427,7 +427,7 @@ class ConfinedType<Type type, list<Pred> predicates, string summary = "",
 // Integer types.
 
 // Any integer type irrespective of its width and signedness semantics.
-def AnyInteger : Type<CPred<"$_self.isa<::mlir::IntegerType>()">, "integer",
+def AnyInteger : Type<CPred<"::llvm::isa<::mlir::IntegerType>($_self)">, "integer",
                       "::mlir::IntegerType">;
 
 // Any integer type (regardless of signedness semantics) of a specific width.
@@ -517,7 +517,7 @@ def UI32 : UI<32>;
 def UI64 : UI<64>;
 
 // Index type.
-def Index : Type<CPred<"$_self.isa<::mlir::IndexType>()">, "index",
+def Index : Type<CPred<"::llvm::isa<::mlir::IndexType>($_self)">, "index",
                  "::mlir::IndexType">,
             BuildableType<"$_builder.getIndexType()">;
 
@@ -528,7 +528,7 @@ def AnySignlessIntegerOrIndex : Type<CPred<"$_self.isSignlessIntOrIndex()">,
 // Floating point types.
 
 // Any float type irrespective of its width.
-def AnyFloat : Type<CPred<"$_self.isa<::mlir::FloatType>()">, "floating-point",
+def AnyFloat : Type<CPred<"::llvm::isa<::mlir::FloatType>($_self)">, "floating-point",
                     "::mlir::FloatType">;
 
 // Float type of a specific width.
@@ -562,13 +562,13 @@ def F8E4M3B11FNUZ : Type<CPred<"$_self.isFloat8E4M3B11FNUZ()">, "f8E4M3B11FNUZ t
 def F8E5M2FNUZ : Type<CPred<"$_self.isFloat8E5M2FNUZ()">, "f8E5M2FNUZ type">,
                  BuildableType<"$_builder.getFloat8E5M2FNUZType()">;
 
-def AnyComplex : Type<CPred<"$_self.isa<::mlir::ComplexType>()">,
+def AnyComplex : Type<CPred<"::llvm::isa<::mlir::ComplexType>($_self)">,
                       "complex-type", "::mlir::ComplexType">;
 
 class Complex<Type type>
     : ConfinedType<AnyComplex, [
           SubstLeaves<"$_self",
-                      "$_self.cast<::mlir::ComplexType>().getElementType()",
+                      "::llvm::cast<::mlir::ComplexType>($_self).getElementType()",
            type.predicate>],
            "complex type with " # type.summary # " elements",
            "::mlir::ComplexType">,
@@ -587,7 +587,7 @@ class OpaqueType<string dialect, string name, string summary>
 // Function Type
 
 // Any function type.
-def FunctionType : Type<CPred<"$_self.isa<::mlir::FunctionType>()">,
+def FunctionType : Type<CPred<"::llvm::isa<::mlir::FunctionType>($_self)">,
                               "function type", "::mlir::FunctionType">;
 
 // A container type is a type that has another type embedded within it.
@@ -607,24 +607,24 @@ class ShapedContainerType<list<Type> allowedTypes,
               Concat<"[](::mlir::Type elementType) { return ",
                 SubstLeaves<"$_self", "elementType",
                 AnyTypeOf<allowedTypes>.predicate>,
-                "; }($_self.cast<::mlir::ShapedType>().getElementType())">]>,
+                "; }(::llvm::cast<::mlir::ShapedType>($_self).getElementType())">]>,
          descr # " of " # AnyTypeOf<allowedTypes>.summary # " values", cppClassName>;
 
 // Whether a shaped type is ranked.
-def HasRankPred : CPred<"$_self.cast<::mlir::ShapedType>().hasRank()">;
+def HasRankPred : CPred<"::llvm::cast<::mlir::ShapedType>($_self).hasRank()">;
 
 // Whether a shaped type has one of the specified ranks.
 class HasAnyRankOfPred<list<int> ranks> : And<[
     HasRankPred,
     Or<!foreach(rank, ranks,
-                CPred<[{$_self.cast<::mlir::ShapedType>().getRank()
+                CPred<[{::llvm::cast<::mlir::ShapedType>($_self).getRank()
                          == }]
                       # rank>)>]>;
 
 // Whether a shaped type has a rank greater than or equal of the specified rank.
 class HasRankGreaterOrEqualPred<int rank> : And<[
     HasRankPred,
-    CPred<[{$_self.cast<::mlir::ShapedType>().getRank() >= }] # rank>
+    CPred<[{::llvm::cast<::mlir::ShapedType>($_self).getRank() >= }] # rank>
 ]>;
 
 // Vector types.
@@ -652,7 +652,7 @@ class ScalableVectorOf<list<Type> allowedTypes> :
 class IsVectorOfRankPred<list<int> allowedRanks> :
   And<[IsVectorTypePred,
        Or<!foreach(allowedlength, allowedRanks,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getRank()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getRank()
                            == }]
                          # allowedlength>)>]>;
 
@@ -661,7 +661,7 @@ class IsVectorOfRankPred<list<int> allowedRanks> :
 class IsFixedVectorOfRankPred<list<int> allowedRanks> :
   And<[IsFixedVectorTypePred,
        Or<!foreach(allowedlength, allowedRanks,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getRank()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getRank()
                            == }]
                          # allowedlength>)>]>;
 
@@ -670,7 +670,7 @@ class IsFixedVectorOfRankPred<list<int> allowedRanks> :
 class IsScalableVectorOfRankPred<list<int> allowedRanks> :
   And<[IsScalableVectorTypePred,
        Or<!foreach(allowedlength, allowedRanks,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getRank()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getRank()
                            == }]
                          # allowedlength>)>]>;
 
@@ -702,7 +702,7 @@ class VectorOfRankAndType<list<int> allowedRanks,
 class IsVectorOfLengthPred<list<int> allowedLengths> :
   And<[IsVectorTypePred,
        Or<!foreach(allowedlength, allowedLengths,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getNumElements()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getNumElements()
                            == }]
                          # allowedlength>)>]>;
 
@@ -711,7 +711,7 @@ class IsVectorOfLengthPred<list<int> allowedLengths> :
 class IsFixedVectorOfLengthPred<list<int> allowedLengths> :
   And<[IsFixedVectorTypePred,
        Or<!foreach(allowedlength, allowedLengths,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getNumElements()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getNumElements()
                            == }]
                          # allowedlength>)>]>;
 
@@ -720,7 +720,7 @@ class IsFixedVectorOfLengthPred<list<int> allowedLengths> :
 class IsScalableVectorOfLengthPred<list<int> allowedLengths> :
   And<[IsScalableVectorTypePred,
        Or<!foreach(allowedlength, allowedLengths,
-                   CPred<[{$_self.cast<::mlir::VectorType>().getNumElements()
+                   CPred<[{::llvm::cast<::mlir::VectorType>($_self).getNumElements()
                            == }]
                          # allowedlength>)>]>;
 
@@ -928,7 +928,7 @@ class StaticShapeMemRefOf<list<Type> allowedTypes> :
 def AnyStaticShapeMemRef : StaticShapeMemRefOf<[AnyType]>;
 
 // For a MemRefType, verify that it has strides.
-def HasStridesPred : CPred<[{ isStrided($_self.cast<::mlir::MemRefType>()) }]>;
+def HasStridesPred : CPred<[{ isStrided(::llvm::cast<::mlir::MemRefType>($_self)) }]>;
 
 class StridedMemRefOf<list<Type> allowedTypes> :
     ConfinedType<MemRefOf<allowedTypes>, [HasStridesPred],
@@ -974,14 +974,14 @@ class MixedContainerType<Type etype, Pred containerPred, code elementTypesCall,
 // A Tuple that holds a mix of elements of the allowed types.
 class TupleOf<list<Type> allowedTypes>
     : MixedContainerType<AnyTypeOf<allowedTypes>, IsTupleTypePred,
-                         "$_self.cast<::mlir::TupleType>().getTypes()",
+                         "::llvm::cast<::mlir::TupleType>($_self).getTypes()",
                          "tuple">;
 
 // A Tuple with arbitrary nesting, where all elements are a mix of the allowed
 // types.
 class NestedTupleOf<list<Type> allowedTypes> :
     MixedContainerType<AnyTypeOf<allowedTypes>, IsTupleTypePred,
-                       "getFlattenedTypes($_self.cast<::mlir::TupleType>())",
+                       "getFlattenedTypes(::llvm::cast<::mlir::TupleType>($_self))",
                        "nested tuple">;
 
 //===----------------------------------------------------------------------===//
@@ -1198,10 +1198,10 @@ class AnyAttrOf<list<Attr> allowedAttrs, string summary = "",
     let convertFromStorage = fromStorage;
 }
 
-def LocationAttr : Attr<CPred<"$_self.isa<::mlir::LocationAttr>()">,
+def LocationAttr : Attr<CPred<"::llvm::isa<::mlir::LocationAttr>($_self)">,
                         "location attribute">;
 
-def BoolAttr : Attr<CPred<"$_self.isa<::mlir::BoolAttr>()">, "bool attribute"> {
+def BoolAttr : Attr<CPred<"::llvm::isa<::mlir::BoolAttr>($_self)">, "bool attribute"> {
   let storageType = [{ ::mlir::BoolAttr }];
   let returnType = [{ bool }];
   let valueType = I1;
@@ -1212,9 +1212,8 @@ def BoolAttr : Attr<CPred<"$_self.isa<::mlir::BoolAttr>()">, "bool attribute"> {
 def IndexAttr :
     TypedAttrBase<
       Index, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()"
-                 ".isa<::mlir::IndexType>()">]>,
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::isa<::mlir::IndexType>(::llvm::cast<::mlir::IntegerAttr>($_self).getType())">]>,
       "index attribute"> {
   let returnType = [{ ::llvm::APInt }];
 }
@@ -1224,8 +1223,8 @@ def IndexAttr :
 class AnyIntegerAttrBase<AnyI attrValType, string descr> :
     TypedAttrBase<
       attrValType, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()."
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getType()."
                  "isInteger(" # attrValType.bitwidth # ")">]>,
       descr> {
   let returnType = [{ ::llvm::APInt }];
@@ -1238,7 +1237,7 @@ def AnyI16Attr : AnyIntegerAttrBase<AnyI16, "16-bit integer attribute">;
 def AnyI32Attr : AnyIntegerAttrBase<AnyI32, "32-bit integer attribute">;
 def AnyI64Attr : AnyIntegerAttrBase<AnyI64, "64-bit integer attribute">;
 
-def APIntAttr : Attr<CPred<"$_self.isa<::mlir::IntegerAttr>()">,
+def APIntAttr : Attr<CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
                      "arbitrary integer attribute"> {
   let storageType = [{ ::mlir::IntegerAttr }];
   let returnType = [{ ::mlir::APInt }];
@@ -1248,8 +1247,8 @@ def APIntAttr : Attr<CPred<"$_self.isa<::mlir::IntegerAttr>()">,
 class SignlessIntegerAttrBase<I attrValType, string descr> :
     TypedAttrBase<
       attrValType, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()."
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getType()."
                  "isSignlessInteger(" # attrValType.bitwidth # ")">]>,
       descr> {
   let returnType = [{ ::llvm::APInt }];
@@ -1277,8 +1276,8 @@ def I64Attr : TypedSignlessIntegerAttrBase<
 class SignedIntegerAttrBase<SI attrValType, string descr> :
     TypedAttrBase<
       attrValType, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()."
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getType()."
                  "isSignedInteger(" # attrValType.bitwidth # ")">]>,
       descr> {
   let returnType = [{ ::llvm::APInt }];
@@ -1306,8 +1305,8 @@ def SI64Attr : TypedSignedIntegerAttrBase<
 class UnsignedIntegerAttrBase<UI attrValType, string descr> :
     TypedAttrBase<
       attrValType, "IntegerAttr",
-      And<[CPred<"$_self.isa<::mlir::IntegerAttr>()">,
-           CPred<"$_self.cast<::mlir::IntegerAttr>().getType()."
+      And<[CPred<"::llvm::isa<::mlir::IntegerAttr>($_self)">,
+           CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getType()."
                  "isUnsignedInteger(" # attrValType.bitwidth # ")">]>,
       descr> {
   let returnType = [{ ::llvm::APInt }];
@@ -1334,8 +1333,8 @@ def UI64Attr : TypedUnsignedIntegerAttrBase<
 // Base class for float attributes of fixed width.
 class FloatAttrBase<F attrValType, string descr> :
     TypedAttrBase<attrValType, "FloatAttr",
-              And<[CPred<"$_self.isa<::mlir::FloatAttr>()">,
-                     CPred<"$_self.cast<::mlir::FloatAttr>().getType().isF" #
+              And<[CPred<"::llvm::isa<::mlir::FloatAttr>($_self)">,
+                     CPred<"::llvm::cast<::mlir::FloatAttr>($_self).getType().isF" #
                            attrValType.bitwidth # "()">]>,
               descr> {
   let returnType = [{ ::llvm::APFloat }];
@@ -1352,16 +1351,16 @@ class StringBasedAttr<Pred condition, string descr> : Attr<condition, descr> {
   let valueType = NoneType;
 }
 
-def StrAttr : StringBasedAttr<CPred<"$_self.isa<::mlir::StringAttr>()">,
+def StrAttr : StringBasedAttr<CPred<"::llvm::isa<::mlir::StringAttr>($_self)">,
                               "string attribute">;
 
 // A string attribute that represents the name of a symbol.
-def SymbolNameAttr : StringBasedAttr<CPred<"$_self.isa<::mlir::StringAttr>()">,
+def SymbolNameAttr : StringBasedAttr<CPred<"::llvm::isa<::mlir::StringAttr>($_self)">,
                                      "string attribute">;
 
 // String attribute that has a specific value type.
 class TypedStrAttr<Type ty>
-    : StringBasedAttr<CPred<"$_self.isa<::mlir::StringAttr>()">,
+    : StringBasedAttr<CPred<"::llvm::isa<::mlir::StringAttr>($_self)">,
                             "string attribute"> {
   let valueType = ty;
 }
@@ -1372,16 +1371,15 @@ class TypedStrAttr<Type ty>
 class TypeAttrBase<string retType, string summary,
                         Pred typePred = CPred<"true">> :
     Attr<And<[
-      CPred<"$_self.isa<::mlir::TypeAttr>()">,
-      CPred<"$_self.cast<::mlir::TypeAttr>().getValue().isa<"
-            # retType # ">()">,
+      CPred<"::llvm::isa<::mlir::TypeAttr>($_self)">,
+      CPred<"::llvm::isa<" # retType # ">(::llvm::cast<::mlir::TypeAttr>($_self).getValue())">,
       SubstLeaves<"$_self",
-                    "$_self.cast<::mlir::TypeAttr>().getValue()", typePred>]>,
+                    "::llvm::cast<::mlir::TypeAttr>($_self).getValue()", typePred>]>,
     summary> {
   let storageType = [{ ::mlir::TypeAttr }];
   let returnType = retType;
   let valueType = NoneType;
-  let convertFromStorage = "$_self.getValue().cast<" # retType # ">()";
+  let convertFromStorage = "::llvm::cast<" # retType # ">($_self.getValue())";
 }
 
 def TypeAttr : TypeAttrBase<"::mlir::Type", "any type attribute"> {
@@ -1397,7 +1395,7 @@ class TypeAttrOf<Type ty>
 // The mere presence of unit attributes has a meaning.  Therefore, unit
 // attributes are always treated as optional and accessors to them return
 // "true" if the attribute is present and "false" otherwise.
-def UnitAttr : Attr<CPred<"$_self.isa<::mlir::UnitAttr>()">, "unit attribute"> {
+def UnitAttr : Attr<CPred<"::llvm::isa<::mlir::UnitAttr>($_self)">, "unit attribute"> {
   let storageType = [{ ::mlir::UnitAttr }];
   let constBuilderCall = "(($0) ? $_builder.getUnitAttr() : nullptr)";
   let convertFromStorage = "$_self != nullptr";
@@ -1420,7 +1418,7 @@ class DictionaryAttrBase<Pred condition, string summary> :
 }
 
 def DictionaryAttr
-    : DictionaryAttrBase<CPred<"$_self.isa<::mlir::DictionaryAttr>()">,
+    : DictionaryAttrBase<CPred<"::llvm::isa<::mlir::DictionaryAttr>($_self)">,
                                "dictionary of named attribute values">;
 
 class ElementsAttrBase<Pred condition, string summary> :
@@ -1430,11 +1428,11 @@ class ElementsAttrBase<Pred condition, string summary> :
   let convertFromStorage = "$_self";
 }
 
-def ElementsAttr : ElementsAttrBase<CPred<"$_self.isa<::mlir::ElementsAttr>()">,
+def ElementsAttr : ElementsAttrBase<CPred<"::llvm::isa<::mlir::ElementsAttr>($_self)">,
                                     "constant vector/tensor attribute">;
 
 class IntElementsAttrBase<Pred condition, string summary> :
-    ElementsAttrBase<And<[CPred<"$_self.isa<::mlir::DenseIntElementsAttr>()">,
+    ElementsAttrBase<And<[CPred<"::llvm::isa<::mlir::DenseIntElementsAttr>($_self)">,
                           condition]>,
                      summary> {
   let storageType = [{ ::mlir::DenseIntElementsAttr }];
@@ -1444,7 +1442,7 @@ class IntElementsAttrBase<Pred condition, string summary> :
 }
 
 class DenseArrayAttrBase<string denseAttrName, string cppType, string summaryName> :
-    ElementsAttrBase<CPred<"$_self.isa<::mlir::" # denseAttrName # ">()">,
+    ElementsAttrBase<CPred<"::llvm::isa<::mlir::" # denseAttrName # ">($_self)">,
                      summaryName # " dense array attribute"> {
   let storageType = "::mlir::" # denseAttrName;
   let returnType = "::llvm::ArrayRef<" # cppType # ">";
@@ -1459,7 +1457,7 @@ def DenseF32ArrayAttr : DenseArrayAttrBase<"DenseF32ArrayAttr", "float", "f32">;
 def DenseF64ArrayAttr : DenseArrayAttrBase<"DenseF64ArrayAttr", "double", "f64">;
 
 def IndexElementsAttr
-    : IntElementsAttrBase<CPred<[{$_self.cast<::mlir::DenseIntElementsAttr>()
+    : IntElementsAttrBase<CPred<[{::llvm::cast<::mlir::DenseIntElementsAttr>($_self)
                                       .getType()
                                       .getElementType()
                                       .isIndex()}]>,
@@ -1468,7 +1466,7 @@ def IndexElementsAttr
 def AnyIntElementsAttr : IntElementsAttrBase<CPred<"true">, "integer elements attribute">;
 
 class IntElementsAttrOf<int width> : IntElementsAttrBase<
-  CPred<"$_self.cast<::mlir::DenseIntElementsAttr>().getType()."
+  CPred<"::llvm::cast<::mlir::DenseIntElementsAttr>($_self).getType()."
         "getElementType().isInteger(" # width # ")">,
   width # "-bit integer elements attribute">;
 
@@ -1476,15 +1474,15 @@ def AnyI32ElementsAttr : IntElementsAttrOf<32>;
 def AnyI64ElementsAttr : IntElementsAttrOf<64>;
 
 class SignlessIntElementsAttr<int width> : IntElementsAttrBase<
-  CPred<"$_self.cast<::mlir::DenseIntElementsAttr>().getType()."
+  CPred<"::llvm::cast<::mlir::DenseIntElementsAttr>($_self).getType()."
         "getElementType().isSignlessInteger(" # width # ")">,
   width # "-bit signless integer elements attribute"> {
 
   // Note that this is only constructing scalar elements attribute.
-  let constBuilderCall = "::mlir::DenseElementsAttr::get("
-    "::mlir::RankedTensorType::get({}, "
-                                  "$_builder.getIntegerType(" # width # ")), "
-    "::llvm::ArrayRef($0)).cast<::mlir::DenseIntElementsAttr>()";
+  let constBuilderCall = "::llvm::cast<::mlir::DenseIntElementsAttr>("
+  "::mlir::DenseElementsAttr::get("
+    "::mlir::RankedTensorType::get({}, $_builder.getIntegerType(" # width # ")), "
+    "::llvm::ArrayRef($0)))";
 }
 
 def I32ElementsAttr : SignlessIntElementsAttr<32>;
@@ -1497,7 +1495,7 @@ class RankedSignlessIntElementsAttr<int width, list<int> dims> :
   // Check that this has the specified shape.
   let predicate = And<[
     SignlessIntElementsAttr<width>.predicate,
-    CPred<"$_self.cast<::mlir::DenseIntElementsAttr>().getType().getShape() == "
+    CPred<"::llvm::cast<::mlir::DenseIntElementsAttr>($_self).getType().getShape() == "
         "::mlir::ArrayRef<int64_t>({" # !interleave(dims, ", ") # "})">]>;
 
   let summary = width # "-bit signless int elements attribute of shape [" #
@@ -1514,8 +1512,8 @@ class RankedI64ElementsAttr<list<int> dims> :
     RankedSignlessIntElementsAttr<64, dims>;
 
 class FloatElementsAttr<int width> : ElementsAttrBase<
-  CPred<"$_self.isa<::mlir::DenseFPElementsAttr>() &&"
-      "$_self.cast<::mlir::DenseElementsAttr>().getType()."
+  CPred<"::llvm::isa<::mlir::DenseFPElementsAttr>($_self) &&"
+      "::llvm::cast<::mlir::DenseElementsAttr>($_self).getType()."
       "getElementType().isF" # width # "()">,
   width # "-bit float elements attribute"> {
 
@@ -1534,12 +1532,12 @@ def F64ElementsAttr : FloatElementsAttr<64>;
 // A `width`-bit floating point elements attribute. The attribute should be
 // ranked and has a shape as specified in `dims`.
 class RankedFloatElementsAttr<int width, list<int> dims> : ElementsAttrBase<
-  CPred<"$_self.isa<::mlir::DenseFPElementsAttr>() &&"
-      "$_self.cast<::mlir::DenseFPElementsAttr>().getType()."
+  CPred<"::llvm::isa<::mlir::DenseFPElementsAttr>($_self) &&"
+      "::llvm::cast<::mlir::DenseFPElementsAttr>($_self).getType()."
       "getElementType().isF" # width # "() && "
       // Check that this is ranked and has the specified shape.
-      "$_self.cast<::mlir::DenseFPElementsAttr>().getType().hasRank() && "
-      "$_self.cast<::mlir::DenseFPElementsAttr>().getType().getShape() == "
+      "::llvm::cast<::mlir::DenseFPElementsAttr>($_self).getType().hasRank() && "
+      "::llvm::cast<::mlir::DenseFPElementsAttr>($_self).getType().getShape() == "
       "::mlir::ArrayRef<int64_t>({" # !interleave(dims, ", ") # "})">,
   width # "-bit float elements attribute of shape [" #
   !interleave(dims, ", ") # "]"> {
@@ -1547,10 +1545,11 @@ class RankedFloatElementsAttr<int width, list<int> dims> : ElementsAttrBase<
   let storageType = [{ ::mlir::DenseFPElementsAttr }];
   let returnType = [{ ::mlir::DenseFPElementsAttr }];
 
-  let constBuilderCall = "::mlir::DenseElementsAttr::get("
+  let constBuilderCall = "::llvm::cast<::mlir::DenseFPElementsAttr>("
+  "::mlir::DenseElementsAttr::get("
     "::mlir::RankedTensorType::get({" # !interleave(dims, ", ") #
     "}, $_builder.getF" # width # "Type()), "
-    "::llvm::ArrayRef($0)).cast<::mlir::DenseFPElementsAttr>()";
+    "::llvm::ArrayRef($0)))";
   let convertFromStorage = "$_self";
 }
 
@@ -1558,7 +1557,7 @@ class RankedF32ElementsAttr<list<int> dims> : RankedFloatElementsAttr<32, dims>;
 class RankedF64ElementsAttr<list<int> dims> : RankedFloatElementsAttr<64, dims>;
 
 def StringElementsAttr : ElementsAttrBase<
-  CPred<"$_self.isa<::mlir::DenseStringElementsAttr>()" >,
+  CPred<"::llvm::isa<::mlir::DenseStringElementsAttr>($_self)" >,
   "string elements attribute"> {
 
   let storageType = [{ ::mlir::DenseElementsAttr }];
@@ -1569,7 +1568,7 @@ def StringElementsAttr : ElementsAttrBase<
 
 // Attributes containing affine maps.
 def AffineMapAttr : Attr<
-CPred<"$_self.isa<::mlir::AffineMapAttr>()">, "AffineMap attribute"> {
+CPred<"::llvm::isa<::mlir::AffineMapAttr>($_self)">, "AffineMap attribute"> {
   let storageType = [{::mlir::AffineMapAttr }];
   let returnType = [{ ::mlir::AffineMap }];
   let valueType = Index;
@@ -1585,7 +1584,7 @@ class ArrayAttrBase<Pred condition, string summary> : Attr<condition, summary> {
   let constBuilderCall = "$_builder.getArrayAttr($0)";
 }
 
-def ArrayAttr : ArrayAttrBase<CPred<"$_self.isa<::mlir::ArrayAttr>()">,
+def ArrayAttr : ArrayAttrBase<CPred<"::llvm::isa<::mlir::ArrayAttr>($_self)">,
                               "array attribute">;
 
 // Base class for array attributes whose elements are of the same kind.
@@ -1593,9 +1592,9 @@ def ArrayAttr : ArrayAttrBase<CPred<"$_self.isa<::mlir::ArrayAttr>()">,
 class TypedArrayAttrBase<Attr element, string summary>: ArrayAttrBase<
     And<[
       // Guarantee this is an ArrayAttr first
-      CPred<"$_self.isa<::mlir::ArrayAttr>()">,
+      CPred<"::llvm::isa<::mlir::ArrayAttr>($_self)">,
       // Guarantee all elements satisfy the constraints from `element`
-      Concat<"::llvm::all_of($_self.cast<::mlir::ArrayAttr>(), "
+      Concat<"::llvm::all_of(::llvm::cast<::mlir::ArrayAttr>($_self), "
                             "[&](::mlir::Attribute attr) { return attr && (",
                                SubstLeaves<"$_self", "attr", element.predicate>,
                             "); })">]>,
@@ -1653,7 +1652,7 @@ def DictArrayAttr :
   TypedArrayAttrBase<DictionaryAttr, "Array of dictionary attributes">;
 
 // Attributes containing symbol references.
-def SymbolRefAttr : Attr<CPred<"$_self.isa<::mlir::SymbolRefAttr>()">,
+def SymbolRefAttr : Attr<CPred<"::llvm::isa<::mlir::SymbolRefAttr>($_self)">,
                         "symbol reference attribute"> {
   let storageType = [{ ::mlir::SymbolRefAttr }];
   let returnType = [{ ::mlir::SymbolRefAttr }];
@@ -1663,7 +1662,7 @@ def SymbolRefAttr : Attr<CPred<"$_self.isa<::mlir::SymbolRefAttr>()">,
   let convertFromStorage = "$_self";
 }
 
-def FlatSymbolRefAttr : Attr<CPred<"$_self.isa<::mlir::FlatSymbolRefAttr>()">,
+def FlatSymbolRefAttr : Attr<CPred<"::llvm::isa<::mlir::FlatSymbolRefAttr>($_self)">,
                                    "flat symbol reference attribute"> {
   let storageType = [{ ::mlir::FlatSymbolRefAttr }];
   let returnType = [{ ::llvm::StringRef }];
@@ -1773,89 +1772,89 @@ class AllAttrOf<list<AttrConstraint> constraints> : AttrConstraint<
 }
 
 class IntMinValue<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::IntegerAttr>().getInt() >= " # n>,
+    CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getInt() >= " # n>,
     "whose minimum value is " # n>;
 
 class IntMaxValue<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::IntegerAttr>().getInt() <= " # n>,
+    CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getInt() <= " # n>,
     "whose maximum value is " # n>;
 
 def IntNonNegative : AttrConstraint<
-    CPred<"!$_self.cast<::mlir::IntegerAttr>().getValue().isNegative()">,
+    CPred<"!::llvm::cast<::mlir::IntegerAttr>($_self).getValue().isNegative()">,
     "whose value is non-negative">;
 
 def IntPositive : AttrConstraint<
-    CPred<"$_self.cast<::mlir::IntegerAttr>().getValue().isStrictlyPositive()">,
+    CPred<"::llvm::cast<::mlir::IntegerAttr>($_self).getValue().isStrictlyPositive()">,
     "whose value is positive">;
 
 class ArrayMinCount<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::ArrayAttr>().size() >= " # n>,
+    CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() >= " # n>,
     "with at least " # n # " elements">;
 
 class ArrayCount<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::ArrayAttr>().size() == " #n>,
+    CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() == " #n>,
     "with exactly " # n # " elements">;
 
 class DenseArrayCount<int n> : AttrConstraint<
-    CPred<"$_self.cast<::mlir::DenseArrayAttr>().size() == " #n>,
+    CPred<"::llvm::cast<::mlir::DenseArrayAttr>($_self).size() == " #n>,
     "with exactly " # n # " elements">;
 
 class DenseArrayStrictlyPositive<DenseArrayAttrBase arrayType> : AttrConstraint<
-  CPred<"::llvm::all_of($_self.cast<" # arrayType #">().asArrayRef(), "
+  CPred<"::llvm::all_of(::llvm::cast<" # arrayType #">($_self).asArrayRef(), "
                         "[&](auto v) { return v > 0; })">,
   "whose value is positive">;
 
 class DenseArrayNonNegative<DenseArrayAttrBase arrayType> : AttrConstraint<
-  CPred<"::llvm::all_of($_self.cast<" # arrayType #">().asArrayRef(), "
+  CPred<"::llvm::all_of(::llvm::cast<" # arrayType #">($_self).asArrayRef(), "
                         "[&](auto v) { return v >= 0; })">,
   "whose value is non-negative">;
 
 class DenseArraySorted<DenseArrayAttrBase arrayType> : AttrConstraint<
-    CPred<"llvm::is_sorted($_self.cast<" # arrayType # ">().asArrayRef())">,
+    CPred<"llvm::is_sorted(::llvm::cast<" # arrayType # ">($_self).asArrayRef())">,
     "should be in non-decreasing order">;
 
 class DenseArrayStrictlySorted<DenseArrayAttrBase arrayType> : AttrConstraint<
     And<[
-      CPred<"llvm::is_sorted($_self.cast<" # arrayType # ">().asArrayRef())">,
+      CPred<"llvm::is_sorted(::llvm::cast<" # arrayType # ">($_self).asArrayRef())">,
       // Check that no two adjacent elements are the same.
       CPred<"[](" # arrayType.returnType # " a) {\n"
         "return std::adjacent_find(std::begin(a), std::end(a)) == "
         "std::end(a);\n"
-        "}($_self.cast<" # arrayType # ">().asArrayRef())"
+        "}(::llvm::cast<" # arrayType # ">($_self).asArrayRef())"
       >]>,
     "should be in increasing order">;
 
 class IntArrayNthElemEq<int index, int value> : AttrConstraint<
     And<[
-      CPred<"$_self.cast<::mlir::ArrayAttr>().size() > " # index>,
-      CPred<"$_self.cast<::mlir::ArrayAttr>()[" # index # "]"
-        ".cast<::mlir::IntegerAttr>().getInt() == " # value>
+      CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() > " # index>,
+      CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>($_self)["
+            # index # "]).getInt() == "  # value>
        ]>,
     "whose " # index # "-th element must be " # value>;
 
 class IntArrayNthElemMinValue<int index, int min> : AttrConstraint<
     And<[
-      CPred<"$_self.cast<::mlir::ArrayAttr>().size() > " # index>,
-      CPred<"$_self.cast<::mlir::ArrayAttr>()[" # index # "]"
-        ".cast<::mlir::IntegerAttr>().getInt() >= " # min>
+      CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() > " # index>,
+      CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>($_self)["
+            # index # "]).getInt() >= " # min>
         ]>,
     "whose " # index # "-th element must be at least " # min>;
 
 class IntArrayNthElemMaxValue<int index, int max> : AttrConstraint<
     And<[
-      CPred<"$_self.cast<::mlir::ArrayAttr>().size() > " # index>,
-      CPred<"$_self.cast<::mlir::ArrayAttr>()[" # index # "]"
-        ".cast<::mlir::IntegerAttr>().getInt() <= " # max>
+      CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() > " # index>,
+      CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>($_self)["
+            # index # "]).getInt() <= " # max>
         ]>,
     "whose " # index # "-th element must be at most " # max>;
 
 class IntArrayNthElemInRange<int index, int min, int max> : AttrConstraint<
     And<[
-      CPred<"$_self.cast<::mlir::ArrayAttr>().size() > " # index>,
-      CPred<"$_self.cast<::mlir::ArrayAttr>()[" # index # "]"
-        ".cast<::mlir::IntegerAttr>().getInt() >= " # min>,
-      CPred<"$_self.cast<::mlir::ArrayAttr>()[" # index # "]"
-        ".cast<::mlir::IntegerAttr>().getInt() <= " # max>
+      CPred<"::llvm::cast<::mlir::ArrayAttr>($_self).size() > " # index>,
+      CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>($_self)["
+            # index # "]).getInt() >= " # min>,
+      CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>($_self)["
+            # index # "]).getInt() <= " # max>
         ]>,
     "whose " # index # "-th element must be at least " # min # " and at most " # max>;
 
@@ -2217,8 +2216,8 @@ class Interface<string name, list<Interface> baseInterfacesArg = []> {
 // AttrInterface represents an interface registered to an attribute.
 class AttrInterface<string name, list<Interface> baseInterfaces = []>
   : Interface<name, baseInterfaces>, InterfaceTrait<name>,
-	  Attr<CPred<"$_self.isa<"
-		  # !if(!empty(cppNamespace),"", cppNamespace # "::") # name # ">()">,
+	  Attr<CPred<"::llvm::isa<"
+		  # !if(!empty(cppNamespace),"", cppNamespace # "::") # name # ">($_self)">,
 			name # " instance"
     > {
 	let storageType = !if(!empty(cppNamespace), "", cppNamespace # "::") # name;
@@ -2233,8 +2232,8 @@ class OpInterface<string name, list<Interface> baseInterfaces = []>
 // TypeInterface represents an interface registered to a type.
 class TypeInterface<string name, list<Interface> baseInterfaces = []>
   : Interface<name, baseInterfaces>, InterfaceTrait<name>,
-	  Type<CPred<"$_self.isa<"
-		  # !if(!empty(cppNamespace),"", cppNamespace # "::") # name # ">()">,
+	  Type<CPred<"::llvm::isa<"
+		  # !if(!empty(cppNamespace),"", cppNamespace # "::") # name # ">($_self)">,
 			name # " instance",
 			!if(!empty(cppNamespace),"", cppNamespace # "::") # name
     >;
@@ -2505,13 +2504,13 @@ class Results<dag rets> {
 // TODO: Improve the autogenerated error messages.
 
 class Rank<string name> :
-    StrFunc<"$" # name # ".getType().cast<::mlir::ShapedType>().getRank()">;
+    StrFunc<"::llvm::cast<::mlir::ShapedType>($" # name # ".getType()).getRank()">;
 
 class Shape<string name> :
-    StrFunc<"$" # name # ".getType().cast<::mlir::ShapedType>().getShape()">;
+    StrFunc<"::llvm::cast<::mlir::ShapedType>($" # name # ".getType()).getShape()">;
 
 class ElementCount<string name> :
-  StrFunc<"$" # name # ".getType().cast<::mlir::ShapedType>()"
+  StrFunc<"llvm::cast<::mlir::ShapedType>($" # name # ".getType())"
                                  ".getNumElements()">;
 
 class ElementType<string name> : StrFunc<"getElementTypeOrSelf($" # name # ")">;

diff  --git a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
index a443b49a2f2a1..ab12f074203d3 100644
--- a/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
+++ b/mlir/include/mlir/Interfaces/DestinationStyleOpInterface.td
@@ -201,7 +201,7 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
       /*methodBody=*/"",
       /*defaultImplementation=*/[{
         assert(opOperand->getOwner() == $_op.getOperation());
-        return !opOperand->get().getType().template isa<ShapedType>();
+        return !::llvm::isa<ShapedType>(opOperand->get().getType());
       }]
     >,
     InterfaceMethod<
@@ -245,7 +245,7 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
           ::llvm::all_of($_op->getOpOperands(),
             [&](::mlir::OpOperand &opOperand) {
               return isScalar(&opOperand) ||
-                     opOperand.get().getType().template isa<::mlir::MemRefType>();
+                     ::llvm::isa<::mlir::MemRefType>(opOperand.get().getType());
             });
       }]
     >,
@@ -259,7 +259,7 @@ def DestinationStyleOpInterface : OpInterface<"DestinationStyleOpInterface"> {
         return ::llvm::all_of($_op->getOpOperands(),
           [&](::mlir::OpOperand &opOperand) {
             return isScalar(&opOperand) ||
-                   opOperand.get().getType().template isa<::mlir::RankedTensorType>();
+                   ::llvm::isa<::mlir::RankedTensorType>(opOperand.get().getType());
           });
       }]
     >

diff  --git a/mlir/include/mlir/Interfaces/VectorInterfaces.td b/mlir/include/mlir/Interfaces/VectorInterfaces.td
index ce2e09612fb1e..8e56d926fc3eb 100644
--- a/mlir/include/mlir/Interfaces/VectorInterfaces.td
+++ b/mlir/include/mlir/Interfaces/VectorInterfaces.td
@@ -34,8 +34,7 @@ def VectorUnrollOpInterface : OpInterface<"VectorUnrollOpInterface"> {
       /*methodBody=*/"",
       /*defaultImplementation=*/[{
         assert($_op->getNumResults() == 1);
-        auto vt = $_op.getResult().getType().
-          template dyn_cast<::mlir::VectorType>();
+        auto vt = ::llvm::dyn_cast<::mlir::VectorType>($_op.getResult().getType());
         if (!vt)
           return ::std::nullopt;
         ::llvm::SmallVector<int64_t, 4> res(vt.getShape().begin(), vt.getShape().end());
@@ -78,8 +77,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
       /*defaultImplementation=*/[{
         return $_op.isBroadcastDim(dim)
             || ($_op.getInBounds()
-                && $_op.getInBounds()->template cast<::mlir::ArrayAttr>()[dim]
-                                    .template cast<::mlir::BoolAttr>().getValue());
+                && cast<::mlir::BoolAttr>(cast<::mlir::ArrayAttr>(*$_op.getInBounds())[dim]).getValue());
       }]
     >,
     InterfaceMethod<
@@ -156,7 +154,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
       /*args=*/(ins),
       /*methodBody=*/"",
       /*defaultImplementation=*/
-        "return $_op.getSource().getType().template cast<::mlir::ShapedType>();"
+        "return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());"
     >,
     InterfaceMethod<
       /*desc=*/"Return the VectorType.",
@@ -165,7 +163,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
       /*args=*/(ins),
       /*methodBody=*/"",
       /*defaultImplementation=*/[{
-        return $_op.getVector().getType().template dyn_cast<::mlir::VectorType>();
+        return ::llvm::dyn_cast<::mlir::VectorType>($_op.getVector().getType());
         }]
     >,
     InterfaceMethod<

diff  --git a/mlir/lib/Dialect/Shape/IR/ShapeCanonicalization.td b/mlir/lib/Dialect/Shape/IR/ShapeCanonicalization.td
index 618d5b08b4b7a..cb294ae2978fc 100644
--- a/mlir/lib/Dialect/Shape/IR/ShapeCanonicalization.td
+++ b/mlir/lib/Dialect/Shape/IR/ShapeCanonicalization.td
@@ -11,7 +11,7 @@ def HasSingleElement : Constraint<CPred< [{
 }]>>;
 
 def HasStaticShape : Constraint<CPred< [{
-  $0.getType().dyn_cast<ShapedType>().hasStaticShape()
+  ::llvm::dyn_cast<ShapedType>($0.getType()).hasStaticShape()
 }]>>;
 
 // Helper that takes the first element of a range.

diff  --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td
index 507f4aaaef649..a108f278c1ffa 100644
--- a/mlir/test/lib/Dialect/Test/TestOps.td
+++ b/mlir/test/lib/Dialect/Test/TestOps.td
@@ -99,7 +99,7 @@ def MultiTensorRankOf : TEST_Op<"multi_tensor_rank_of"> {
 }
 
 def TEST_TestType : DialectType<Test_Dialect,
-    CPred<"$_self.isa<::test::TestType>()">, "test">,
+    CPred<"::llvm::isa<::test::TestType>($_self)">, "test">,
     BuildableType<"$_builder.getType<::test::TestType>()">;
 
 //===----------------------------------------------------------------------===//
@@ -260,7 +260,7 @@ def DerivedTypeAttrOp : TEST_Op<"derived_type_attr", []> {
   DerivedTypeAttr element_dtype =
     DerivedTypeAttr<"return getElementTypeOrSelf(getOutput().getType());">;
   DerivedAttr num_elements = DerivedAttr<"int",
-    "return getOutput().getType().cast<ShapedType>().getNumElements();",
+    "return ::llvm::cast<ShapedType>(getOutput().getType()).getNumElements();",
     "$_builder.getI32IntegerAttr($_self)">;
 }
 
@@ -578,7 +578,7 @@ def FunctionalRegionOp : TEST_Op<"functional_region_op",
   let extraClassDeclaration = [{
     ::mlir::Region *getCallableRegion() { return &getBody(); }
     ::llvm::ArrayRef<::mlir::Type> getCallableResults() {
-      return getType().cast<::mlir::FunctionType>().getResults();
+      return ::llvm::cast<::mlir::FunctionType>(getType()).getResults();
     }
     ::mlir::ArrayAttr getCallableArgAttrs() {
       return nullptr;
@@ -1262,11 +1262,10 @@ def OpAllAttrConstraint2 : TEST_Op<"all_attr_constraint_of2"> {
   let results = (outs I32);
 }
 def Constraint0 : AttrConstraint<
-    CPred<"$_self.cast<ArrayAttr>()[0]."
-          "cast<::mlir::IntegerAttr>().getInt() == 0">,
+    CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<ArrayAttr>($_self)[0]).getInt() == 0">,
     "[0] == 0">;
 def Constraint1 : AttrConstraint<
-    CPred<"$_self.cast<ArrayAttr>()[1].cast<::mlir::IntegerAttr>().getInt() == 1">,
+    CPred<"::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<ArrayAttr>($_self)[1]).getInt() == 1">,
     "[1] == 1">;
 def : Pat<(OpAllAttrConstraint1
             AllAttrOf<[Constraint0, Constraint1]>:$attr),
@@ -1483,7 +1482,7 @@ def OpAttrMatch2 : TEST_Op<"match_op_attribute2"> {
   let results = (outs I32);
 }
 def MoreConstraint : AttrConstraint<
-    CPred<"$_self.cast<IntegerAttr>().getInt() == 4">, "more constraint">;
+    CPred<"::llvm::cast<IntegerAttr>($_self).getInt() == 4">, "more constraint">;
 def : Pat<(OpAttrMatch1 $required, $optional, $default_valued,
                         MoreConstraint:$more),
           (OpAttrMatch2 $required, $optional, $default_valued, $more)>;

diff  --git a/mlir/test/mlir-pdll/Parser/include_td.pdll b/mlir/test/mlir-pdll/Parser/include_td.pdll
index 5526aa852482f..f8d78dd339f23 100644
--- a/mlir/test/mlir-pdll/Parser/include_td.pdll
+++ b/mlir/test/mlir-pdll/Parser/include_td.pdll
@@ -21,7 +21,7 @@
 // CHECK-NEXT:   Operands { operands : Variadic<I64> }
 // CHECK-NEXT:   Results { results : Variadic<I64> }
 // CHECK-NEXT: }
-  
+
 // CHECK:      AttributeConstraint `I64Attr` {
 // CHECK-NEXT:   Summary: 64-bit signless integer attribute
 // CHECK-NEXT:   CppClass: ::mlir::IntegerAttr
@@ -32,13 +32,13 @@
 // CHECK-NEXT:   CppClass: ::mlir::IntegerType
 // CHECK-NEXT: }
 
-// CHECK: UserConstraintDecl {{.*}} Name<TestAttrInterface> ResultType<Tuple<>> Code<return ::mlir::success((self.isa<TestAttrInterface>()));>
+// CHECK: UserConstraintDecl {{.*}} Name<TestAttrInterface> ResultType<Tuple<>> Code<return ::mlir::success((::llvm::isa<TestAttrInterface>(self)));>
 // CHECK:  `Inputs`
 // CHECK:    `-VariableDecl {{.*}} Name<self> Type<Attr>
 // CHECK:      `Constraints`
 // CHECK:        `-AttrConstraintDecl
 
-// CHECK: UserConstraintDecl {{.*}} Name<TestTypeInterface> ResultType<Tuple<>> Code<return ::mlir::success((self.isa<TestTypeInterface>()));>
+// CHECK: UserConstraintDecl {{.*}} Name<TestTypeInterface> ResultType<Tuple<>> Code<return ::mlir::success((::llvm::isa<TestTypeInterface>(self)));>
 // CHECK:  `Inputs`
 // CHECK:    `-VariableDecl {{.*}} Name<self> Type<Type>
 // CHECK:      `Constraints`

diff  --git a/mlir/test/mlir-tblgen/interfaces-as-constraints.td b/mlir/test/mlir-tblgen/interfaces-as-constraints.td
index 6468e60c70d29..efce1c3927044 100644
--- a/mlir/test/mlir-tblgen/interfaces-as-constraints.td
+++ b/mlir/test/mlir-tblgen/interfaces-as-constraints.td
@@ -24,22 +24,22 @@ def OpUsingAllOfThose : Op<Test_Dialect, "OpUsingAllOfThose"> {
 }
 
 // CHECK: static ::mlir::LogicalResult {{__mlir_ods_local_type_constraint.*}}(
-// CHECK:   if (!((type.isa<TopLevelTypeInterface>()))) {
+// CHECK:   if (!((::llvm::isa<TopLevelTypeInterface>(type)))) {
 // CHECK-NEXT:    return op->emitOpError(valueKind) << " #" << valueIndex
 // CHECK-NEXT:        << " must be TopLevelTypeInterface instance, but got " << type;
 
 // CHECK: static ::mlir::LogicalResult {{__mlir_ods_local_type_constraint.*}}(
-// CHECK:   if (!((type.isa<test::TypeInterfaceInNamespace>()))) {
+// CHECK:   if (!((::llvm::isa<test::TypeInterfaceInNamespace>(type)))) {
 // CHECK-NEXT:    return op->emitOpError(valueKind) << " #" << valueIndex
 // CHECK-NEXT:        << " must be TypeInterfaceInNamespace instance, but got " << type;
 
 // CHECK: static ::mlir::LogicalResult {{__mlir_ods_local_attr_constraint.*}}(
-// CHECK:   if (attr && !((attr.isa<TopLevelAttrInterface>())))
+// CHECK:   if (attr && !((::llvm::isa<TopLevelAttrInterface>(attr))))
 // CHECK-NEXT:     return getDiag() << "attribute '" << attrName
 // CHECK-NEXT:        << "' failed to satisfy constraint: TopLevelAttrInterface instance";
 
 // CHECK: static ::mlir::LogicalResult {{__mlir_ods_local_attr_constraint.*}}(
-// CHECK:   if (attr && !((attr.isa<test::AttrInterfaceInNamespace>())))
+// CHECK:   if (attr && !((::llvm::isa<test::AttrInterfaceInNamespace>(attr))))
 // CHECK-NEXT:    return getDiag() << "attribute '" << attrName
 // CHECK-NEXT:        << "' failed to satisfy constraint: AttrInterfaceInNamespace instance";
 

diff  --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td
index a10cb0536ebc8..d39dc3e4841e9 100644
--- a/mlir/test/mlir-tblgen/op-attribute.td
+++ b/mlir/test/mlir-tblgen/op-attribute.td
@@ -310,18 +310,18 @@ def BOp : NS_Op<"b_op", []> {
 
 // DEF-LABEL: BOpAdaptor::verify
 // DEF: if (tblgen_any_attr && !((true)))
-// DEF: if (tblgen_bool_attr && !((tblgen_bool_attr.isa<::mlir::BoolAttr>())))
-// DEF: if (tblgen_i32_attr && !(((tblgen_i32_attr.isa<::mlir::IntegerAttr>())) && ((tblgen_i32_attr.cast<::mlir::IntegerAttr>().getType().isSignlessInteger(32)))))
-// DEF: if (tblgen_i64_attr && !(((tblgen_i64_attr.isa<::mlir::IntegerAttr>())) && ((tblgen_i64_attr.cast<::mlir::IntegerAttr>().getType().isSignlessInteger(64)))))
-// DEF: if (tblgen_f32_attr && !(((tblgen_f32_attr.isa<::mlir::FloatAttr>())) && ((tblgen_f32_attr.cast<::mlir::FloatAttr>().getType().isF32()))))
-// DEF: if (tblgen_f64_attr && !(((tblgen_f64_attr.isa<::mlir::FloatAttr>())) && ((tblgen_f64_attr.cast<::mlir::FloatAttr>().getType().isF64()))))
-// DEF: if (tblgen_str_attr && !((tblgen_str_attr.isa<::mlir::StringAttr>())))
-// DEF: if (tblgen_elements_attr && !((tblgen_elements_attr.isa<::mlir::ElementsAttr>())))
-// DEF: if (tblgen_function_attr && !((tblgen_function_attr.isa<::mlir::FlatSymbolRefAttr>())))
-// DEF: if (tblgen_some_type_attr && !(((tblgen_some_type_attr.isa<::mlir::TypeAttr>())) && ((tblgen_some_type_attr.cast<::mlir::TypeAttr>().getValue().isa<SomeType>())) && ((true))))
-// DEF: if (tblgen_array_attr && !((tblgen_array_attr.isa<::mlir::ArrayAttr>())))
-// DEF: if (tblgen_some_attr_array && !(((tblgen_some_attr_array.isa<::mlir::ArrayAttr>())) && (::llvm::all_of(tblgen_some_attr_array.cast<::mlir::ArrayAttr>(), [&](::mlir::Attribute attr) { return attr && ((some-condition)); }))))
-// DEF: if (tblgen_type_attr && !(((tblgen_type_attr.isa<::mlir::TypeAttr>())) && ((tblgen_type_attr.cast<::mlir::TypeAttr>().getValue().isa<::mlir::Type>())) && ((true))))
+// DEF: if (tblgen_bool_attr && !((::llvm::isa<::mlir::BoolAttr>(tblgen_bool_attr))))
+// DEF: if (tblgen_i32_attr && !(((::llvm::isa<::mlir::IntegerAttr>(tblgen_i32_attr))) && ((::llvm::cast<::mlir::IntegerAttr>(tblgen_i32_attr).getType().isSignlessInteger(32)))))
+// DEF: if (tblgen_i64_attr && !(((::llvm::isa<::mlir::IntegerAttr>(tblgen_i64_attr))) && ((::llvm::cast<::mlir::IntegerAttr>(tblgen_i64_attr).getType().isSignlessInteger(64)))))
+// DEF: if (tblgen_f32_attr && !(((::llvm::isa<::mlir::FloatAttr>(tblgen_f32_attr))) && ((::llvm::cast<::mlir::FloatAttr>(tblgen_f32_attr).getType().isF32()))))
+// DEF: if (tblgen_f64_attr && !(((::llvm::isa<::mlir::FloatAttr>(tblgen_f64_attr))) && ((::llvm::cast<::mlir::FloatAttr>(tblgen_f64_attr).getType().isF64()))))
+// DEF: if (tblgen_str_attr && !((::llvm::isa<::mlir::StringAttr>(tblgen_str_attr))))
+// DEF: if (tblgen_elements_attr && !((::llvm::isa<::mlir::ElementsAttr>(tblgen_elements_attr))))
+// DEF: if (tblgen_function_attr && !((::llvm::isa<::mlir::FlatSymbolRefAttr>(tblgen_function_attr))))
+// DEF: if (tblgen_some_type_attr && !(((::llvm::isa<::mlir::TypeAttr>(tblgen_some_type_attr))) && ((::llvm::isa<SomeType>(::llvm::cast<::mlir::TypeAttr>(tblgen_some_type_attr).getValue()))) && ((true))))
+// DEF: if (tblgen_array_attr && !((::llvm::isa<::mlir::ArrayAttr>(tblgen_array_attr))))
+// DEF: if (tblgen_some_attr_array && !(((::llvm::isa<::mlir::ArrayAttr>(tblgen_some_attr_array))) && (::llvm::all_of(::llvm::cast<::mlir::ArrayAttr>(tblgen_some_attr_array), [&](::mlir::Attribute attr) { return attr && ((some-condition)); }))))
+// DEF: if (tblgen_type_attr && !(((::llvm::isa<::mlir::TypeAttr>(tblgen_type_attr))) && ((::llvm::isa<::mlir::Type>(::llvm::cast<::mlir::TypeAttr>(tblgen_type_attr).getValue()))) && ((true))))
 
 // Test common attribute kind getters' return types
 // ---

diff  --git a/mlir/test/mlir-tblgen/predicate.td b/mlir/test/mlir-tblgen/predicate.td
index e9ab67b035f45..44f395d5dc60c 100644
--- a/mlir/test/mlir-tblgen/predicate.td
+++ b/mlir/test/mlir-tblgen/predicate.td
@@ -27,12 +27,12 @@ def OpA : NS_Op<"op_for_CPred_containing_multiple_same_placeholder", []> {
 // CHECK-NOT.        << " must be 32-bit integer or floating-point type, but got " << type;
 
 // CHECK: static ::mlir::LogicalResult [[$TENSOR_CONSTRAINT:__mlir_ods_local_type_constraint.*]](
-// CHECK:       if (!(((type.isa<::mlir::TensorType>())) && ([](::mlir::Type elementType) { return (true); }(type.cast<::mlir::ShapedType>().getElementType())))) {
+// CHECK:       if (!(((::llvm::isa<::mlir::TensorType>(type))) && ([](::mlir::Type elementType) { return (true); }(::llvm::cast<::mlir::ShapedType>(type).getElementType())))) {
 // CHECK-NEXT:    return op->emitOpError(valueKind) << " #" << valueIndex
 // CHECK-NEXT:        << " must be tensor of any type values, but got " << type;
 
 // CHECK: static ::mlir::LogicalResult [[$TENSOR_INTEGER_FLOAT_CONSTRAINT:__mlir_ods_local_type_constraint.*]](
-// CHECK:       if (!(((type.isa<::mlir::TensorType>())) && ([](::mlir::Type elementType) { return ((elementType.isF32())) || ((elementType.isSignlessInteger(32))); }(type.cast<::mlir::ShapedType>().getElementType())))) {
+// CHECK:       if (!(((::llvm::isa<::mlir::TensorType>(type))) && ([](::mlir::Type elementType) { return ((elementType.isF32())) || ((elementType.isSignlessInteger(32))); }(::llvm::cast<::mlir::ShapedType>(type).getElementType())))) {
 // CHECK-NEXT:    return op->emitOpError(valueKind) << " #" << valueIndex
 // CHECK-NEXT:        << " must be tensor of 32-bit float or 32-bit signless integer values, but got " << type;
 
@@ -54,7 +54,7 @@ def OpF : NS_Op<"op_for_int_min_val", []> {
 }
 
 // CHECK-LABEL: OpFAdaptor::verify
-// CHECK:       (tblgen_attr.cast<::mlir::IntegerAttr>().getInt() >= 10)
+// CHECK:       (::llvm::cast<::mlir::IntegerAttr>(tblgen_attr).getInt() >= 10)
 // CHECK-NEXT:  "attribute 'attr' failed to satisfy constraint: 32-bit signless integer attribute whose minimum value is 10"
 
 def OpFX : NS_Op<"op_for_int_max_val", []> {
@@ -62,7 +62,7 @@ def OpFX : NS_Op<"op_for_int_max_val", []> {
 }
 
 // CHECK-LABEL: OpFXAdaptor::verify
-// CHECK:       (tblgen_attr.cast<::mlir::IntegerAttr>().getInt() <= 10)
+// CHECK:       (::llvm::cast<::mlir::IntegerAttr>(tblgen_attr).getInt() <= 10)
 // CHECK-NEXT:  "attribute 'attr' failed to satisfy constraint: 32-bit signless integer attribute whose maximum value is 10"
 
 def OpG : NS_Op<"op_for_arr_min_count", []> {
@@ -70,7 +70,7 @@ def OpG : NS_Op<"op_for_arr_min_count", []> {
 }
 
 // CHECK-LABEL: OpGAdaptor::verify
-// CHECK:       (tblgen_attr.cast<::mlir::ArrayAttr>().size() >= 8)
+// CHECK:       (::llvm::cast<::mlir::ArrayAttr>(tblgen_attr).size() >= 8)
 // CHECK-NEXT:  "attribute 'attr' failed to satisfy constraint: array attribute with at least 8 elements"
 
 def OpH : NS_Op<"op_for_arr_value_at_index", []> {
@@ -78,7 +78,7 @@ def OpH : NS_Op<"op_for_arr_value_at_index", []> {
 }
 
 // CHECK-LABEL: OpHAdaptor::verify
-// CHECK: (((tblgen_attr.cast<::mlir::ArrayAttr>().size() > 0)) && ((tblgen_attr.cast<::mlir::ArrayAttr>()[0].cast<::mlir::IntegerAttr>().getInt() == 8)))))
+// CHECK: (((::llvm::cast<::mlir::ArrayAttr>(tblgen_attr).size() > 0)) && ((::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>(tblgen_attr)[0]).getInt() == 8)))))
 // CHECK-NEXT:  "attribute 'attr' failed to satisfy constraint: array attribute whose 0-th element must be 8"
 
 def OpI: NS_Op<"op_for_arr_min_value_at_index", []> {
@@ -86,7 +86,7 @@ def OpI: NS_Op<"op_for_arr_min_value_at_index", []> {
 }
 
 // CHECK-LABEL: OpIAdaptor::verify
-// CHECK: (((tblgen_attr.cast<::mlir::ArrayAttr>().size() > 0)) && ((tblgen_attr.cast<::mlir::ArrayAttr>()[0].cast<::mlir::IntegerAttr>().getInt() >= 8)))))
+// CHECK: (((::llvm::cast<::mlir::ArrayAttr>(tblgen_attr).size() > 0)) && ((::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>(tblgen_attr)[0]).getInt() >= 8)))))
 // CHECK-NEXT: "attribute 'attr' failed to satisfy constraint: array attribute whose 0-th element must be at least 8"
 
 def OpJ: NS_Op<"op_for_arr_max_value_at_index", []> {
@@ -94,7 +94,7 @@ def OpJ: NS_Op<"op_for_arr_max_value_at_index", []> {
 }
 
 // CHECK-LABEL: OpJAdaptor::verify
-// CHECK: (((tblgen_attr.cast<::mlir::ArrayAttr>().size() > 0)) && ((tblgen_attr.cast<::mlir::ArrayAttr>()[0].cast<::mlir::IntegerAttr>().getInt() <= 8)))))
+// CHECK: (((::llvm::cast<::mlir::ArrayAttr>(tblgen_attr).size() > 0)) && ((::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>(tblgen_attr)[0]).getInt() <= 8)))))
 // CHECK-NEXT: "attribute 'attr' failed to satisfy constraint: array attribute whose 0-th element must be at most 8"
 
 def OpK: NS_Op<"op_for_arr_in_range_at_index", []> {
@@ -102,7 +102,7 @@ def OpK: NS_Op<"op_for_arr_in_range_at_index", []> {
 }
 
 // CHECK-LABEL: OpKAdaptor::verify
-// CHECK: (((tblgen_attr.cast<::mlir::ArrayAttr>().size() > 0)) && ((tblgen_attr.cast<::mlir::ArrayAttr>()[0].cast<::mlir::IntegerAttr>().getInt() >= 4)) && ((tblgen_attr.cast<::mlir::ArrayAttr>()[0].cast<::mlir::IntegerAttr>().getInt() <= 8)))))
+// CHECK: (((::llvm::cast<::mlir::ArrayAttr>(tblgen_attr).size() > 0)) && ((::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>(tblgen_attr)[0]).getInt() >= 4)) && ((::llvm::cast<::mlir::IntegerAttr>(::llvm::cast<::mlir::ArrayAttr>(tblgen_attr)[0]).getInt() <= 8)))))
 // CHECK-NEXT: "attribute 'attr' failed to satisfy constraint: array attribute whose 0-th element must be at least 4 and at most 8"
 
 def OpL: NS_Op<"op_for_TCopVTEtAreSameAt", [
@@ -134,7 +134,7 @@ def OpM : NS_Op<"op_for_AnyTensorOf", []> {
 
 def OpN : NS_Op<"op_for_StringEscaping", []> {
   let arguments = (ins
-    StringBasedAttr<CPred<"$_self.cast<StringAttr>().getValue() == \"foo\"">,
+    StringBasedAttr<CPred<"::llvm::cast<StringAttr>($_self).getValue() == \"foo\"">,
                     "only value \"foo\" is allowed">:$s
   );
 }


        


More information about the Mlir-commits mailing list