[llvm-branch-commits] [mlir] 93592b7 - [mlir][OpFormatGen] Format enum attribute cases as keywords when possible
River Riddle via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jan 14 11:47:19 PST 2021
Author: River Riddle
Date: 2021-01-14T11:35:49-08:00
New Revision: 93592b726c7587aa86548cc74268346e25a4a7f2
URL: https://github.com/llvm/llvm-project/commit/93592b726c7587aa86548cc74268346e25a4a7f2
DIFF: https://github.com/llvm/llvm-project/commit/93592b726c7587aa86548cc74268346e25a4a7f2.diff
LOG: [mlir][OpFormatGen] Format enum attribute cases as keywords when possible
In the overwhelmingly common case, enum attribute case strings represent valid identifiers in MLIR syntax. This revision updates the format generator to format as a keyword in these cases, removing the need to wrap values in a string. The parser still retains the ability to parse the string form, but the printer will use the keyword form when applicable.
Differential Revision: https://reviews.llvm.org/D94575
Added:
Modified:
mlir/integration_test/Dialect/Linalg/CPU/matmul-vs-matvec.mlir
mlir/test/Analysis/test-dominance.mlir
mlir/test/Analysis/test-liveness.mlir
mlir/test/Conversion/AffineToStandard/lower-affine.mlir
mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir
mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
mlir/test/Dialect/Affine/parallelize.mlir
mlir/test/Dialect/GPU/all-reduce-max.mlir
mlir/test/Dialect/GPU/all-reduce.mlir
mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
mlir/test/Dialect/Linalg/loops.mlir
mlir/test/Dialect/Linalg/sparse_1d.mlir
mlir/test/Dialect/Linalg/sparse_2d.mlir
mlir/test/Dialect/Linalg/sparse_3d.mlir
mlir/test/Dialect/Linalg/tile-and-distribute.mlir
mlir/test/Dialect/Linalg/vectorization.mlir
mlir/test/Dialect/SCF/for-loop-specialization.mlir
mlir/test/Dialect/SCF/ops.mlir
mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
mlir/test/Dialect/SPIRV/IR/availability.mlir
mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
mlir/test/Dialect/SPIRV/IR/group-ops.mlir
mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
mlir/test/Dialect/SPIRV/IR/target-env.mlir
mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
mlir/test/Dialect/Standard/canonicalize.mlir
mlir/test/Dialect/Standard/expand-ops.mlir
mlir/test/Dialect/Standard/expand-tanh.mlir
mlir/test/Dialect/Vector/vector-contract-transforms.mlir
mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
mlir/test/Dialect/Vector/vector-transforms.mlir
mlir/test/EDSC/builder-api-test.cpp
mlir/test/IR/core-ops.mlir
mlir/test/IR/invalid-ops.mlir
mlir/test/IR/invalid.mlir
mlir/test/Target/SPIRV/barrier-ops.mlir
mlir/test/Target/SPIRV/group-ops.mlir
mlir/test/Target/SPIRV/non-uniform-ops.mlir
mlir/test/Transforms/buffer-deallocation.mlir
mlir/test/Transforms/buffer-hoisting.mlir
mlir/test/Transforms/buffer-loop-hoisting.mlir
mlir/test/Transforms/canonicalize-block-merge.mlir
mlir/test/Transforms/canonicalize.mlir
mlir/test/Transforms/constant-fold.mlir
mlir/test/Transforms/copy-removal.mlir
mlir/test/Transforms/cse.mlir
mlir/test/Transforms/parametric-tiling.mlir
mlir/test/Transforms/promote-buffers-to-stack.mlir
mlir/test/Transforms/sccp-callgraph.mlir
mlir/test/Transforms/sccp-structured.mlir
mlir/test/Transforms/sccp.mlir
mlir/test/mlir-tblgen/op-format.mlir
mlir/tools/mlir-tblgen/OpFormatGen.cpp
Removed:
################################################################################
diff --git a/mlir/integration_test/Dialect/Linalg/CPU/matmul-vs-matvec.mlir b/mlir/integration_test/Dialect/Linalg/CPU/matmul-vs-matvec.mlir
index 7aa875a3cf53..27331b616a6e 100644
--- a/mlir/integration_test/Dialect/Linalg/CPU/matmul-vs-matvec.mlir
+++ b/mlir/integration_test/Dialect/Linalg/CPU/matmul-vs-matvec.mlir
@@ -55,7 +55,7 @@ func @main() {
scf.for %j = %c0 to %n step %c1 {
%e1 = load %C1[%i, %j] : memref<?x?xf32>
%e2 = load %C2[%i, %j] : memref<?x?xf32>
- %c = cmpf "oeq", %e1, %e2 : f32
+ %c = cmpf oeq, %e1, %e2 : f32
assert %c, "Matmul does not produce same output as matvec"
}
}
diff --git a/mlir/test/Analysis/test-dominance.mlir b/mlir/test/Analysis/test-dominance.mlir
index 4575f7c27999..0fc30da128c2 100644
--- a/mlir/test/Analysis/test-dominance.mlir
+++ b/mlir/test/Analysis/test-dominance.mlir
@@ -51,7 +51,7 @@ func @func_condBranch(%cond : i1) {
func @func_loop(%arg0 : i32, %arg1 : i32) {
br ^loopHeader(%arg0 : i32)
^loopHeader(%counter : i32):
- %lessThan = cmpi "slt", %counter, %arg1 : i32
+ %lessThan = cmpi slt, %counter, %arg1 : i32
cond_br %lessThan, ^loopBody, ^exit
^loopBody:
%const0 = constant 1 : i32
@@ -155,7 +155,7 @@ func @func_loop_nested_region(
%arg4 : index) {
br ^loopHeader(%arg0 : i32)
^loopHeader(%counter : i32):
- %lessThan = cmpi "slt", %counter, %arg1 : i32
+ %lessThan = cmpi slt, %counter, %arg1 : i32
cond_br %lessThan, ^loopBody, ^exit
^loopBody:
%const0 = constant 1 : i32
diff --git a/mlir/test/Analysis/test-liveness.mlir b/mlir/test/Analysis/test-liveness.mlir
index 3beb2186afb5..11648e4819a3 100644
--- a/mlir/test/Analysis/test-liveness.mlir
+++ b/mlir/test/Analysis/test-liveness.mlir
@@ -84,7 +84,7 @@ func @func_loop(%arg0 : i32, %arg1 : i32) -> i32 {
// CHECK-NEXT: %2 = cmpi
// CHECK-NEXT: cond_br
// CHECK-NEXT: EndLiveness
- %lessThan = cmpi "slt", %counter, %arg1 : i32
+ %lessThan = cmpi slt, %counter, %arg1 : i32
cond_br %lessThan, ^loopBody(%i : i32), ^exit(%i : i32)
^loopBody(%val : i32):
// CHECK: Block: 2
diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index 38d269913e51..cac7d17a8348 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -159,7 +159,7 @@ func private @get_idx() -> (index)
// CHECK-NEXT: %[[v1:.*]] = muli %[[v0]], %[[cm1]] : index
// CHECK-NEXT: %[[c20:.*]] = constant 20 : index
// CHECK-NEXT: %[[v2:.*]] = addi %[[v1]], %[[c20]] : index
-// CHECK-NEXT: %[[v3:.*]] = cmpi "sge", %[[v2]], %[[c0]] : index
+// CHECK-NEXT: %[[v3:.*]] = cmpi sge, %[[v2]], %[[c0]] : index
// CHECK-NEXT: if %[[v3]] {
// CHECK-NEXT: call @body(%[[v0:.*]]) : (index) -> ()
// CHECK-NEXT: }
@@ -180,7 +180,7 @@ func @if_only() {
// CHECK-NEXT: %[[v1:.*]] = muli %[[v0]], %[[cm1]] : index
// CHECK-NEXT: %[[c20:.*]] = constant 20 : index
// CHECK-NEXT: %[[v2:.*]] = addi %[[v1]], %[[c20]] : index
-// CHECK-NEXT: %[[v3:.*]] = cmpi "sge", %[[v2]], %[[c0]] : index
+// CHECK-NEXT: %[[v3:.*]] = cmpi sge, %[[v2]], %[[c0]] : index
// CHECK-NEXT: if %[[v3]] {
// CHECK-NEXT: call @body(%[[v0:.*]]) : (index) -> ()
// CHECK-NEXT: } else {
@@ -205,12 +205,12 @@ func @if_else() {
// CHECK-NEXT: %[[v1:.*]] = muli %[[v0]], %[[cm1]] : index
// CHECK-NEXT: %[[c20:.*]] = constant 20 : index
// CHECK-NEXT: %[[v2:.*]] = addi %[[v1]], %[[c20]] : index
-// CHECK-NEXT: %[[v3:.*]] = cmpi "sge", %[[v2]], %[[c0]] : index
+// CHECK-NEXT: %[[v3:.*]] = cmpi sge, %[[v2]], %[[c0]] : index
// CHECK-NEXT: if %[[v3]] {
// CHECK-NEXT: %[[c0_0:.*]] = constant 0 : index
// CHECK-NEXT: %[[cm10:.*]] = constant -10 : index
// CHECK-NEXT: %[[v4:.*]] = addi %[[v0]], %[[cm10]] : index
-// CHECK-NEXT: %[[v5:.*]] = cmpi "sge", %[[v4]], %[[c0_0]] : index
+// CHECK-NEXT: %[[v5:.*]] = cmpi sge, %[[v4]], %[[c0_0]] : index
// CHECK-NEXT: if %[[v5]] {
// CHECK-NEXT: call @body(%[[v0:.*]]) : (index) -> ()
// CHECK-NEXT: }
@@ -218,7 +218,7 @@ func @if_else() {
// CHECK-NEXT: %[[c0_0:.*]] = constant 0 : index
// CHECK-NEXT: %[[cm10:.*]] = constant -10 : index
// CHECK-NEXT: %{{.*}} = addi %[[v0]], %[[cm10]] : index
-// CHECK-NEXT: %{{.*}} = cmpi "sge", %{{.*}}, %[[c0_0]] : index
+// CHECK-NEXT: %{{.*}} = cmpi sge, %{{.*}}, %[[c0_0]] : index
// CHECK-NEXT: if %{{.*}} {
// CHECK-NEXT: call @mid(%[[v0:.*]]) : (index) -> ()
// CHECK-NEXT: }
@@ -249,22 +249,22 @@ func @nested_ifs() {
// CHECK-NEXT: %[[v2:.*]] = addi %[[v1]], %{{.*}} : index
// CHECK-NEXT: %[[c1:.*]] = constant 1 : index
// CHECK-NEXT: %[[v3:.*]] = addi %[[v2]], %[[c1]] : index
-// CHECK-NEXT: %[[v4:.*]] = cmpi "sge", %[[v3]], %[[c0]] : index
+// CHECK-NEXT: %[[v4:.*]] = cmpi sge, %[[v3]], %[[c0]] : index
// CHECK-NEXT: %[[cm1_0:.*]] = constant -1 : index
// CHECK-NEXT: %[[v5:.*]] = addi %{{.*}}, %[[cm1_0]] : index
-// CHECK-NEXT: %[[v6:.*]] = cmpi "sge", %[[v5]], %[[c0]] : index
+// CHECK-NEXT: %[[v6:.*]] = cmpi sge, %[[v5]], %[[c0]] : index
// CHECK-NEXT: %[[v7:.*]] = and %[[v4]], %[[v6]] : i1
// CHECK-NEXT: %[[cm1_1:.*]] = constant -1 : index
// CHECK-NEXT: %[[v8:.*]] = addi %{{.*}}, %[[cm1_1]] : index
-// CHECK-NEXT: %[[v9:.*]] = cmpi "sge", %[[v8]], %[[c0]] : index
+// CHECK-NEXT: %[[v9:.*]] = cmpi sge, %[[v8]], %[[c0]] : index
// CHECK-NEXT: %[[v10:.*]] = and %[[v7]], %[[v9]] : i1
// CHECK-NEXT: %[[cm1_2:.*]] = constant -1 : index
// CHECK-NEXT: %[[v11:.*]] = addi %{{.*}}, %[[cm1_2]] : index
-// CHECK-NEXT: %[[v12:.*]] = cmpi "sge", %[[v11]], %[[c0]] : index
+// CHECK-NEXT: %[[v12:.*]] = cmpi sge, %[[v11]], %[[c0]] : index
// CHECK-NEXT: %[[v13:.*]] = and %[[v10]], %[[v12]] : i1
// CHECK-NEXT: %[[cm42:.*]] = constant -42 : index
// CHECK-NEXT: %[[v14:.*]] = addi %{{.*}}, %[[cm42]] : index
-// CHECK-NEXT: %[[v15:.*]] = cmpi "eq", %[[v14]], %[[c0]] : index
+// CHECK-NEXT: %[[v15:.*]] = cmpi eq, %[[v14]], %[[c0]] : index
// CHECK-NEXT: %[[v16:.*]] = and %[[v13]], %[[v15]] : i1
// CHECK-NEXT: if %[[v16]] {
// CHECK-NEXT: call @body(%[[v0:.*]]) : (index) -> ()
@@ -292,7 +292,7 @@ func @if_for() {
// CHECK-NEXT: %[[v1:.*]] = muli %[[v0]], %[[cm1]] : index
// CHECK-NEXT: %[[c20:.*]] = constant 20 : index
// CHECK-NEXT: %[[v2:.*]] = addi %[[v1]], %[[c20]] : index
-// CHECK-NEXT: %[[v3:.*]] = cmpi "sge", %[[v2]], %[[c0]] : index
+// CHECK-NEXT: %[[v3:.*]] = cmpi sge, %[[v2]], %[[c0]] : index
// CHECK-NEXT: if %[[v3]] {
// CHECK-NEXT: %[[c0:.*]]{{.*}} = constant 0 : index
// CHECK-NEXT: %[[c42:.*]]{{.*}} = constant 42 : index
@@ -301,7 +301,7 @@ func @if_for() {
// CHECK-NEXT: %[[c0_:.*]]{{.*}} = constant 0 : index
// CHECK-NEXT: %[[cm10:.*]] = constant -10 : index
// CHECK-NEXT: %[[v4:.*]] = addi %{{.*}}, %[[cm10]] : index
-// CHECK-NEXT: %[[v5:.*]] = cmpi "sge", %[[v4]], %[[c0_:.*]]{{.*}} : index
+// CHECK-NEXT: %[[v5:.*]] = cmpi sge, %[[v4]], %[[c0_:.*]]{{.*}} : index
// CHECK-NEXT: if %[[v5]] {
// CHECK-NEXT: call @body2(%[[v0]], %{{.*}}) : (index, index) -> ()
affine.if #set1(%i) {
@@ -318,7 +318,7 @@ func @if_for() {
// CHECK-NEXT: %[[c0:.*]]{{.*}} = constant 0 : index
// CHECK-NEXT: %[[cm10:.*]]{{.*}} = constant -10 : index
// CHECK-NEXT: %{{.*}} = addi %{{.*}}, %[[cm10:.*]]{{.*}} : index
-// CHECK-NEXT: %{{.*}} = cmpi "sge", %{{.*}}, %[[c0:.*]]{{.*}} : index
+// CHECK-NEXT: %{{.*}} = cmpi sge, %{{.*}}, %[[c0:.*]]{{.*}} : index
// CHECK-NEXT: if %{{.*}} {
// CHECK-NEXT: %[[c0_:.*]]{{.*}} = constant 0 : index
// CHECK-NEXT: %[[c42_:.*]]{{.*}} = constant 42 : index
@@ -346,11 +346,11 @@ func @if_for() {
// CHECK-NEXT: %[[cm1:.*]] = constant -1 : index
// CHECK-NEXT: %[[a:.*]] = muli %{{.*}}, %[[cm1]] : index
// CHECK-NEXT: %[[b:.*]] = addi %[[a]], %{{.*}} : index
-// CHECK-NEXT: %[[c:.*]] = cmpi "sgt", %{{.*}}, %[[b]] : index
+// CHECK-NEXT: %[[c:.*]] = cmpi sgt, %{{.*}}, %[[b]] : index
// CHECK-NEXT: %[[d:.*]] = select %[[c]], %{{.*}}, %[[b]] : index
// CHECK-NEXT: %[[c10:.*]] = constant 10 : index
// CHECK-NEXT: %[[e:.*]] = addi %{{.*}}, %[[c10]] : index
-// CHECK-NEXT: %[[f:.*]] = cmpi "slt", %{{.*}}, %[[e]] : index
+// CHECK-NEXT: %[[f:.*]] = cmpi slt, %{{.*}}, %[[e]] : index
// CHECK-NEXT: %[[g:.*]] = select %[[f]], %{{.*}}, %[[e]] : index
// CHECK-NEXT: %[[c1_0:.*]] = constant 1 : index
// CHECK-NEXT: for %{{.*}} = %[[d]] to %[[g]] step %[[c1_0]] {
@@ -370,22 +370,22 @@ func @loop_min_max(%N : index) {
#map_7_values = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>
-// Check that the "min" (cmpi "slt" + select) reduction sequence is emitted
+// Check that the "min" (cmpi slt + select) reduction sequence is emitted
// correctly for an affine map with 7 results.
// CHECK-LABEL: func @min_reduction_tree
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
-// CHECK-NEXT: %[[c01:.+]] = cmpi "slt", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %[[c01:.+]] = cmpi slt, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: %[[r01:.+]] = select %[[c01]], %{{.*}}, %{{.*}} : index
-// CHECK-NEXT: %[[c012:.+]] = cmpi "slt", %[[r01]], %{{.*}} : index
+// CHECK-NEXT: %[[c012:.+]] = cmpi slt, %[[r01]], %{{.*}} : index
// CHECK-NEXT: %[[r012:.+]] = select %[[c012]], %[[r01]], %{{.*}} : index
-// CHECK-NEXT: %[[c0123:.+]] = cmpi "slt", %[[r012]], %{{.*}} : index
+// CHECK-NEXT: %[[c0123:.+]] = cmpi slt, %[[r012]], %{{.*}} : index
// CHECK-NEXT: %[[r0123:.+]] = select %[[c0123]], %[[r012]], %{{.*}} : index
-// CHECK-NEXT: %[[c01234:.+]] = cmpi "slt", %[[r0123]], %{{.*}} : index
+// CHECK-NEXT: %[[c01234:.+]] = cmpi slt, %[[r0123]], %{{.*}} : index
// CHECK-NEXT: %[[r01234:.+]] = select %[[c01234]], %[[r0123]], %{{.*}} : index
-// CHECK-NEXT: %[[c012345:.+]] = cmpi "slt", %[[r01234]], %{{.*}} : index
+// CHECK-NEXT: %[[c012345:.+]] = cmpi slt, %[[r01234]], %{{.*}} : index
// CHECK-NEXT: %[[r012345:.+]] = select %[[c012345]], %[[r01234]], %{{.*}} : index
-// CHECK-NEXT: %[[c0123456:.+]] = cmpi "slt", %[[r012345]], %{{.*}} : index
+// CHECK-NEXT: %[[c0123456:.+]] = cmpi slt, %[[r012345]], %{{.*}} : index
// CHECK-NEXT: %[[r0123456:.+]] = select %[[c0123456]], %[[r012345]], %{{.*}} : index
// CHECK-NEXT: %[[c1:.*]] = constant 1 : index
// CHECK-NEXT: for %{{.*}} = %[[c0]] to %[[r0123456]] step %[[c1]] {
@@ -478,7 +478,7 @@ func @affine_apply_mod(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[c42:.*]] = constant 42 : index
// CHECK-NEXT: %[[v0:.*]] = remi_signed %{{.*}}, %[[c42]] : index
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
-// CHECK-NEXT: %[[v1:.*]] = cmpi "slt", %[[v0]], %[[c0]] : index
+// CHECK-NEXT: %[[v1:.*]] = cmpi slt, %[[v0]], %[[c0]] : index
// CHECK-NEXT: %[[v2:.*]] = addi %[[v0]], %[[c42]] : index
// CHECK-NEXT: %[[v3:.*]] = select %[[v1]], %[[v2]], %[[v0]] : index
%0 = affine.apply #mapmod (%arg0)
@@ -497,7 +497,7 @@ func @affine_apply_floordiv(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[c42:.*]] = constant 42 : index
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
// CHECK-NEXT: %[[cm1:.*]] = constant -1 : index
-// CHECK-NEXT: %[[v0:.*]] = cmpi "slt", %{{.*}}, %[[c0]] : index
+// CHECK-NEXT: %[[v0:.*]] = cmpi slt, %{{.*}}, %[[c0]] : index
// CHECK-NEXT: %[[v1:.*]] = subi %[[cm1]], %{{.*}} : index
// CHECK-NEXT: %[[v2:.*]] = select %[[v0]], %[[v1]], %{{.*}} : index
// CHECK-NEXT: %[[v3:.*]] = divi_signed %[[v2]], %[[c42]] : index
@@ -519,7 +519,7 @@ func @affine_apply_ceildiv(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[c42:.*]] = constant 42 : index
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
// CHECK-NEXT: %[[c1:.*]] = constant 1 : index
-// CHECK-NEXT: %[[v0:.*]] = cmpi "sle", %{{.*}}, %[[c0]] : index
+// CHECK-NEXT: %[[v0:.*]] = cmpi sle, %{{.*}}, %[[c0]] : index
// CHECK-NEXT: %[[v1:.*]] = subi %[[c0]], %{{.*}} : index
// CHECK-NEXT: %[[v2:.*]] = subi %{{.*}}, %[[c1]] : index
// CHECK-NEXT: %[[v3:.*]] = select %[[v0]], %[[v1]], %[[v2]] : index
@@ -624,7 +624,7 @@ func @affine_min(%arg0: index, %arg1: index) -> index{
// CHECK: %[[Cm2:.*]] = constant -1
// CHECK: %[[neg2:.*]] = muli %[[ARG0]], %[[Cm2:.*]]
// CHECK: %[[second:.*]] = addi %[[ARG1]], %[[neg2]]
- // CHECK: %[[cmp:.*]] = cmpi "slt", %[[first]], %[[second]]
+ // CHECK: %[[cmp:.*]] = cmpi slt, %[[first]], %[[second]]
// CHECK: select %[[cmp]], %[[first]], %[[second]]
%0 = affine.min affine_map<(d0,d1) -> (d0 - d1, d1 - d0)>(%arg0, %arg1)
return %0 : index
@@ -639,7 +639,7 @@ func @affine_max(%arg0: index, %arg1: index) -> index{
// CHECK: %[[Cm2:.*]] = constant -1
// CHECK: %[[neg2:.*]] = muli %[[ARG0]], %[[Cm2:.*]]
// CHECK: %[[second:.*]] = addi %[[ARG1]], %[[neg2]]
- // CHECK: %[[cmp:.*]] = cmpi "sgt", %[[first]], %[[second]]
+ // CHECK: %[[cmp:.*]] = cmpi sgt, %[[first]], %[[second]]
// CHECK: select %[[cmp]], %[[first]], %[[second]]
%0 = affine.max affine_map<(d0,d1) -> (d0 - d1, d1 - d0)>(%arg0, %arg1)
return %0 : index
diff --git a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
index c0cba8c360ca..e81c4cd103a5 100644
--- a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
+++ b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
@@ -32,7 +32,7 @@ module attributes {
// CHECK: %[[ADD:.+]] = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[VAL]] : i32
// CHECK: %[[OUTPTR:.+]] = spv.AccessChain %[[OUTPUT]][%[[ZERO]], %[[ZERO]]]
-// CHECK: %[[ELECT:.+]] = spv.GroupNonUniformElect "Subgroup" : i1
+// CHECK: %[[ELECT:.+]] = spv.GroupNonUniformElect Subgroup : i1
// CHECK: spv.selection {
// CHECK: spv.BranchConditional %[[ELECT]], ^bb1, ^bb2
diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index 2c75f99650ab..c1fc82e51c50 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -11,7 +11,7 @@ func @branch_loop() {
// CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}}
^bb1(%0: index, %1: index):
// CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64
- %2 = cmpi "slt", %0, %1 : index
+ %2 = cmpi slt, %0, %1 : index
// CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]]
cond_br %2, ^bb2(%end, %end : index, index), ^bb3
// CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64):
diff --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 2454ced8ac35..e72aabe3bbdb 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -284,10 +284,10 @@ module {
// CHECK: [[VAL_43:%.*]] = affine.min #[[$MAP4]]([[VAL_29]]){{\[}}[[VAL_42]]]
// CHECK: [[VAL_44:%.*]] = subview [[VAL_2]]{{\[}}[[VAL_28]], [[VAL_29]]] {{\[}}[[VAL_41]], [[VAL_43]]] {{\[}}%[[C1]], %[[C1]]] : memref<?x?xf32, #[[$MAP0]]> to memref<?x?xf32, #[[$MAP5]]>
// CHECK: [[VAL_45:%.*]] = affine.apply #[[$MAP2]]([[VAL_22]]){{\[}}%[[C1]], %[[C0]]]
-// CHECK: [[VAL_46:%.*]] = cmpi "slt", [[VAL_45]], [[VAL_31_SQUARED]] : index
+// CHECK: [[VAL_46:%.*]] = cmpi slt, [[VAL_45]], [[VAL_31_SQUARED]] : index
// CHECK: scf.if [[VAL_46]] {
// CHECK: [[VAL_47:%.*]] = affine.apply #[[$MAP2]]([[VAL_23]]){{\[}}%[[C1]], %[[C0]]]
-// CHECK: [[VAL_48:%.*]] = cmpi "slt", [[VAL_47]], [[VAL_33]] : index
+// CHECK: [[VAL_48:%.*]] = cmpi slt, [[VAL_47]], [[VAL_33]] : index
// CHECK: scf.if [[VAL_48]] {
// CHECK: [[VAL_49:%.*]] = load [[VAL_34]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
// CHECK: [[VAL_50:%.*]] = load [[VAL_39]]{{\[}}[[VAL_45]], [[VAL_47]]] : memref<?x?xf32, #[[$MAP5]]>
diff --git a/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir b/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir
index 67e0bb5f9739..793d7f706f24 100644
--- a/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir
+++ b/mlir/test/Conversion/SCFToStandard/convert-to-cfg.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: func @simple_std_for_loop(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
// CHECK-NEXT: br ^bb1(%{{.*}} : index)
// CHECK-NEXT: ^bb1(%{{.*}}: index): // 2 preds: ^bb0, ^bb2
-// CHECK-NEXT: %{{.*}} = cmpi "slt", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = cmpi slt, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: cond_br %{{.*}}, ^bb2, ^bb3
// CHECK-NEXT: ^bb2: // pred: ^bb1
// CHECK-NEXT: %{{.*}} = constant 1 : index
@@ -21,13 +21,13 @@ func @simple_std_for_loop(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-LABEL: func @simple_std_2_for_loops(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index) {
// CHECK-NEXT: br ^bb1(%{{.*}} : index)
// CHECK-NEXT: ^bb1(%[[ub0:.*]]: index): // 2 preds: ^bb0, ^bb5
-// CHECK-NEXT: %[[cond0:.*]] = cmpi "slt", %[[ub0]], %{{.*}} : index
+// CHECK-NEXT: %[[cond0:.*]] = cmpi slt, %[[ub0]], %{{.*}} : index
// CHECK-NEXT: cond_br %[[cond0]], ^bb2, ^bb6
// CHECK-NEXT: ^bb2: // pred: ^bb1
// CHECK-NEXT: %{{.*}} = constant 1 : index
// CHECK-NEXT: br ^bb3(%{{.*}} : index)
// CHECK-NEXT: ^bb3(%[[ub1:.*]]: index): // 2 preds: ^bb2, ^bb4
-// CHECK-NEXT: %[[cond1:.*]] = cmpi "slt", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %[[cond1:.*]] = cmpi slt, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: cond_br %[[cond1]], ^bb4, ^bb5
// CHECK-NEXT: ^bb4: // pred: ^bb3
// CHECK-NEXT: %{{.*}} = constant 1 : index
@@ -111,7 +111,7 @@ func @simple_std_2_ifs(%arg0: i1) {
// CHECK-LABEL: func @simple_std_for_loop_with_2_ifs(%{{.*}}: index, %{{.*}}: index, %{{.*}}: index, %{{.*}}: i1) {
// CHECK-NEXT: br ^bb1(%{{.*}} : index)
// CHECK-NEXT: ^bb1(%{{.*}}: index): // 2 preds: ^bb0, ^bb7
-// CHECK-NEXT: %{{.*}} = cmpi "slt", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = cmpi slt, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: cond_br %{{.*}}, ^bb2, ^bb8
// CHECK-NEXT: ^bb2: // pred: ^bb1
// CHECK-NEXT: %{{.*}} = constant 1 : index
@@ -230,12 +230,12 @@ func @nested_if_yield(%arg0: i1) -> (index) {
// CHECK: [[VAL_5:%.*]] = constant 1 : index
// CHECK: br ^bb1([[VAL_0]] : index)
// CHECK: ^bb1([[VAL_6:%.*]]: index):
-// CHECK: [[VAL_7:%.*]] = cmpi "slt", [[VAL_6]], [[VAL_2]] : index
+// CHECK: [[VAL_7:%.*]] = cmpi slt, [[VAL_6]], [[VAL_2]] : index
// CHECK: cond_br [[VAL_7]], ^bb2, ^bb6
// CHECK: ^bb2:
// CHECK: br ^bb3([[VAL_1]] : index)
// CHECK: ^bb3([[VAL_8:%.*]]: index):
-// CHECK: [[VAL_9:%.*]] = cmpi "slt", [[VAL_8]], [[VAL_3]] : index
+// CHECK: [[VAL_9:%.*]] = cmpi slt, [[VAL_8]], [[VAL_3]] : index
// CHECK: cond_br [[VAL_9]], ^bb4, ^bb5
// CHECK: ^bb4:
// CHECK: [[VAL_10:%.*]] = constant 1 : index
@@ -265,7 +265,7 @@ func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: br ^[[COND:.*]](%[[LB]], %[[INIT0]], %[[INIT1]] : index, f32, f32)
//
// CHECK: ^[[COND]](%[[ITER:.*]]: index, %[[ITER_ARG0:.*]]: f32, %[[ITER_ARG1:.*]]: f32):
-// CHECK: %[[CMP:.*]] = cmpi "slt", %[[ITER]], %[[UB]] : index
+// CHECK: %[[CMP:.*]] = cmpi slt, %[[ITER]], %[[UB]] : index
// CHECK: cond_br %[[CMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]]
//
// CHECK: ^[[BODY]]:
@@ -330,7 +330,7 @@ func @simple_parallel_reduce_loop(%arg0: index, %arg1: index,
// Condition branch takes as arguments the current value of the iteration
// variable and the current partially reduced value.
// CHECK: ^[[COND]](%[[ITER:.*]]: index, %[[ITER_ARG:.*]]: f32
- // CHECK: %[[COMP:.*]] = cmpi "slt", %[[ITER]], %[[UB]]
+ // CHECK: %[[COMP:.*]] = cmpi slt, %[[ITER]], %[[UB]]
// CHECK: cond_br %[[COMP]], ^[[BODY:.*]], ^[[CONTINUE:.*]]
// Bodies of scf.reduce operations are folded into the main loop body. The
@@ -551,7 +551,7 @@ func @nested_while_ops(%arg0: f32) -> i64 {
func @ifs_in_parallel(%arg1: index, %arg2: index, %arg3: index, %arg4: i1, %arg5: i1) {
// CHECK: br ^[[LOOP_LATCH:.*]](%[[ARG0]] : index)
// CHECK: ^[[LOOP_LATCH]](%[[LOOP_IV:.*]]: index):
- // CHECK: %[[LOOP_COND:.*]] = cmpi "slt", %[[LOOP_IV]], %[[ARG1]] : index
+ // CHECK: %[[LOOP_COND:.*]] = cmpi slt, %[[LOOP_IV]], %[[ARG1]] : index
// CHECK: cond_br %[[LOOP_COND]], ^[[LOOP_BODY:.*]], ^[[LOOP_CONT:.*]]
// CHECK: ^[[LOOP_BODY]]:
// CHECK: cond_br %[[ARG3]], ^[[IF1_THEN:.*]], ^[[IF1_CONT:.*]]
diff --git a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
index eb33f9e2bbea..688b9fbffba7 100644
--- a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir
@@ -9,7 +9,7 @@
// CHECK: %[[RET:.*]] = shape.const_witness true
// CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<?xindex>
// CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<?xindex>
-// CHECK: %[[LHS_RANK_ULE:.*]] = cmpi "ule", %[[LHS_RANK]], %[[RHS_RANK]] : index
+// CHECK: %[[LHS_RANK_ULE:.*]] = cmpi ule, %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[LESSER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[GREATER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[RHS_RANK]], %[[LHS_RANK]] : index
// CHECK: %[[LESSER_RANK_OPERAND:.*]] = select %[[LHS_RANK_ULE]], %[[LHS]], %[[RHS]] : tensor<?xindex>
@@ -19,9 +19,9 @@
// CHECK: %[[GREATER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[GREATER_RANK_OPERAND]][%[[IV]]] : tensor<?xindex>
// CHECK: %[[IVSHIFTED:.*]] = subi %[[IV]], %[[RANK_DIFF]] : index
// CHECK: %[[LESSER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[LESSER_RANK_OPERAND]][%[[IVSHIFTED]]] : tensor<?xindex>
-// CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
-// CHECK: %[[LESSER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[LESSER_RANK_OPERAND_EXTENT]], %[[C1]] : index
-// CHECK: %[[EXTENTS_AGREE:.*]] = cmpi "eq", %[[GREATER_RANK_OPERAND_EXTENT]], %[[LESSER_RANK_OPERAND_EXTENT]] : index
+// CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi eq, %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
+// CHECK: %[[LESSER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi eq, %[[LESSER_RANK_OPERAND_EXTENT]], %[[C1]] : index
+// CHECK: %[[EXTENTS_AGREE:.*]] = cmpi eq, %[[GREATER_RANK_OPERAND_EXTENT]], %[[LESSER_RANK_OPERAND_EXTENT]] : index
// CHECK: %[[OR_TMP:.*]] = or %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE]], %[[LESSER_RANK_OPERAND_EXTENT_IS_ONE]] : i1
// CHECK: %[[BROADCAST_IS_VALID:.*]] = or %[[EXTENTS_AGREE]], %[[OR_TMP]] : i1
// CHECK: assert %[[BROADCAST_IS_VALID]], "invalid broadcast"
diff --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
index c2ab39f338b1..9f7a20ab9de6 100644
--- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
@@ -272,14 +272,14 @@ func @shape_eq(%a : tensor<?xindex>, %b : tensor<?xindex>) -> i1 {
// CHECK: %[[C0:.*]] = constant 0 : index
// CHECK: %[[RANK_A:.*]] = dim %[[A]], %[[C0]] : tensor<?xindex>
// CHECK: %[[RANK_B:.*]] = dim %[[B]], %[[C0]] : tensor<?xindex>
- // CHECK: %[[RANK_EQ:.*]] = cmpi "eq", %[[RANK_A]], %[[RANK_B]]
+ // CHECK: %[[RANK_EQ:.*]] = cmpi eq, %[[RANK_A]], %[[RANK_B]]
// CHECK: %[[SHAPE_EQ:.*]] = scf.if %[[RANK_EQ]] -> (i1) {
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[INIT:.*]] = constant true
// CHECK: %[[SHAPE_EQ_INNER:.*]] = scf.for %[[I:.*]] = %[[C0]] to %[[RANK_A]] step %[[C1]] iter_args(%[[CONJ:.*]] = %[[INIT]]) -> (i1) {
// CHECK: %[[EXTENT_A:.*]] = tensor.extract %[[A]][%[[I]]] : tensor<?xindex>
// CHECK: %[[EXTENT_B:.*]] = tensor.extract %[[B]][%[[I]]] : tensor<?xindex>
- // CHECK: %[[EXTENT_EQ:.*]] = cmpi "eq", %[[EXTENT_A]], %[[EXTENT_B]]
+ // CHECK: %[[EXTENT_EQ:.*]] = cmpi eq, %[[EXTENT_A]], %[[EXTENT_B]]
// CHECK: %[[CONJ_NEXT:.*]] = and %[[CONJ]], %[[EXTENT_EQ]]
// CHECK: scf.yield %[[CONJ_NEXT]] : i1
// CHECK: }
@@ -313,7 +313,7 @@ func @broadcast_unknown_extents(%a : tensor<?xindex>, %b : tensor<?xindex>) {
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<?xindex>
// CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<?xindex>
- // CHECK: %[[LHS_RANK_ULE:.*]] = cmpi "ule", %[[LHS_RANK]], %[[RHS_RANK]] : index
+ // CHECK: %[[LHS_RANK_ULE:.*]] = cmpi ule, %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[LESSER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[GREATER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[RHS_RANK]], %[[LHS_RANK]] : index
// CHECK: %[[ERASED_LHS:.*]] = tensor.cast %[[LHS]] : tensor<?xindex> to tensor<?xindex>
@@ -323,14 +323,14 @@ func @broadcast_unknown_extents(%a : tensor<?xindex>, %b : tensor<?xindex>) {
// CHECK: %[[RANK_DIFF:.*]] = subi %[[GREATER_RANK]], %[[LESSER_RANK]] : index
// CHECK: %[[RESULT:.*]] = dynamic_tensor_from_elements %[[GREATER_RANK]] {
// CHECK: ^bb0(%[[OUTPUT_DIMENSION:.*]]: index):
- // CHECK: %[[IS_UNCHALLENGED_DIMENSION:.*]] = cmpi "ult", %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
+ // CHECK: %[[IS_UNCHALLENGED_DIMENSION:.*]] = cmpi ult, %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
// CHECK: %[[GREATER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[GREATER_RANK_OPERAND]][%[[OUTPUT_DIMENSION]]] : tensor<?xindex>
// CHECK: %[[OUTPUT_EXTENT:.*]] = scf.if %[[IS_UNCHALLENGED_DIMENSION]] -> (index) {
// CHECK: scf.yield %[[GREATER_RANK_OPERAND_EXTENT]] : index
// CHECK: } else {
// CHECK: %[[LESSER_RANK_OPERAND_DIMENSION:.*]] = subi %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
// CHECK: %[[LESSER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[LESSER_RANK_OPERAND]][%[[LESSER_RANK_OPERAND_DIMENSION]]] : tensor<?xindex>
- // CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
+ // CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi eq, %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
// CHECK: %[[BROADCASTED_EXTENT:.*]] = select %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE]], %[[LESSER_RANK_OPERAND_EXTENT]], %[[GREATER_RANK_OPERAND_EXTENT]] : index
// CHECK: scf.yield %[[BROADCASTED_EXTENT]] : index
// CHECK: }
@@ -353,7 +353,7 @@ func @broadcast_known_
diff erent_extents(%a : tensor<2xindex>, %b : tensor<3xinde
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<2xindex>
// CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<3xindex>
- // CHECK: %[[LHS_RANK_ULE:.*]] = cmpi "ule", %[[LHS_RANK]], %[[RHS_RANK]] : index
+ // CHECK: %[[LHS_RANK_ULE:.*]] = cmpi ule, %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[LESSER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[GREATER_RANK:.*]] = select %[[LHS_RANK_ULE]], %[[RHS_RANK]], %[[LHS_RANK]] : index
// CHECK: %[[ERASED_LHS:.*]] = tensor.cast %[[LHS]] : tensor<2xindex> to tensor<?xindex>
@@ -363,14 +363,14 @@ func @broadcast_known_
diff erent_extents(%a : tensor<2xindex>, %b : tensor<3xinde
// CHECK: %[[RANK_DIFF:.*]] = subi %[[GREATER_RANK]], %[[LESSER_RANK]] : index
// CHECK: %[[RESULT:.*]] = dynamic_tensor_from_elements %[[GREATER_RANK]] {
// CHECK: ^bb0(%[[OUTPUT_DIMENSION:.*]]: index):
- // CHECK: %[[IS_UNCHALLENGED_DIMENSION:.*]] = cmpi "ult", %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
+ // CHECK: %[[IS_UNCHALLENGED_DIMENSION:.*]] = cmpi ult, %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
// CHECK: %[[GREATER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[GREATER_RANK_OPERAND]][%[[OUTPUT_DIMENSION]]] : tensor<?xindex>
// CHECK: %[[OUTPUT_EXTENT:.*]] = scf.if %[[IS_UNCHALLENGED_DIMENSION]] -> (index) {
// CHECK: scf.yield %[[GREATER_RANK_OPERAND_EXTENT]] : index
// CHECK: } else {
// CHECK: %[[LESSER_RANK_OPERAND_DIMENSION:.*]] = subi %[[OUTPUT_DIMENSION]], %[[RANK_DIFF]] : index
// CHECK: %[[LESSER_RANK_OPERAND_EXTENT:.*]] = tensor.extract %[[LESSER_RANK_OPERAND]][%[[LESSER_RANK_OPERAND_DIMENSION]]] : tensor<?xindex>
- // CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
+ // CHECK: %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE:.*]] = cmpi eq, %[[GREATER_RANK_OPERAND_EXTENT]], %[[C1]] : index
// CHECK: %[[BROADCASTED_EXTENT:.*]] = select %[[GREATER_RANK_OPERAND_EXTENT_IS_ONE]], %[[LESSER_RANK_OPERAND_EXTENT]], %[[GREATER_RANK_OPERAND_EXTENT]] : index
// CHECK: scf.yield %[[BROADCASTED_EXTENT]] : index
// CHECK: }
@@ -397,7 +397,7 @@ func @try_is_broadcastable(%a : tensor<3xindex>, %b : tensor<?xindex>) -> i1 {
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<3xindex>
// CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<?xindex>
-// CHECK: %[[LHS_SMALLER:.*]] = cmpi "ule", %[[LHS_RANK]], %[[RHS_RANK]] : index
+// CHECK: %[[LHS_SMALLER:.*]] = cmpi ule, %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[SMALLER_RANK:.*]] = select %[[LHS_SMALLER]], %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[LARGER_RANK:.*]] = select %[[LHS_SMALLER]], %[[RHS_RANK]], %[[LHS_RANK]] : index
// CHECK: %[[RANK_ERASED_LHS:.*]] = tensor.cast %[[LHS]] : tensor<3xindex> to tensor<?xindex>
@@ -408,11 +408,11 @@ func @try_is_broadcastable(%a : tensor<3xindex>, %b : tensor<?xindex>) -> i1 {
// CHECK: %[[TRUE:.*]] = constant true
// CHECK: %[[ALL_RESULT:.*]] = scf.for %[[I:.*]] = %[[RANK_DIFF]] to %[[LARGER_RANK]] step %[[C1]] iter_args(%[[ALL_SO_FAR:.*]] = %[[TRUE]]) -> (i1) {
// CHECK: %[[LARGER_EXTENT:.*]] = tensor.extract %[[LARGER_SHAPE]]{{\[}}%[[I]]] : tensor<?xindex>
-// CHECK: %[[LARGER_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[LARGER_EXTENT]], %[[C1]] : index
+// CHECK: %[[LARGER_EXTENT_IS_ONE:.*]] = cmpi eq, %[[LARGER_EXTENT]], %[[C1]] : index
// CHECK: %[[SMALLER_EXTENT_INDEX:.*]] = subi %[[I]], %[[RANK_DIFF]] : index
// CHECK: %[[SMALLER_EXTENT:.*]] = tensor.extract %[[SMALLER_SHAPE]]{{\[}}%[[SMALLER_EXTENT_INDEX]]] : tensor<?xindex>
-// CHECK: %[[SMALLER_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[SMALLER_EXTENT]], %[[C1]] : index
-// CHECK: %[[EXTENTS_ARE_EQUAL:.*]] = cmpi "eq", %[[LARGER_EXTENT]], %[[SMALLER_EXTENT]] : index
+// CHECK: %[[SMALLER_EXTENT_IS_ONE:.*]] = cmpi eq, %[[SMALLER_EXTENT]], %[[C1]] : index
+// CHECK: %[[EXTENTS_ARE_EQUAL:.*]] = cmpi eq, %[[LARGER_EXTENT]], %[[SMALLER_EXTENT]] : index
// CHECK: %[[EITHER_EXTENT_IS_ONE:.*]] = or %[[LARGER_EXTENT_IS_ONE]], %[[SMALLER_EXTENT_IS_ONE]] : i1
// CHECK: %[[OR_EXTENTS_ARE_EQUAL:.*]] = or %[[EITHER_EXTENT_IS_ONE]], %[[EXTENTS_ARE_EQUAL]] : i1
// CHECK: %[[NEW_ALL_SO_FAR:.*]] = and %[[ALL_SO_FAR]], %[[OR_EXTENTS_ARE_EQUAL]] : i1
@@ -435,7 +435,7 @@ func @broadcast(%a : tensor<?xindex>, %b : tensor<?xindex>) -> !shape.witness {
// CHECK: %[[C1:.*]] = constant 1 : index
// CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<?xindex>
// CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<?xindex>
-// CHECK: %[[LHS_SMALLER:.*]] = cmpi "ule", %[[LHS_RANK]], %[[RHS_RANK]] : index
+// CHECK: %[[LHS_SMALLER:.*]] = cmpi ule, %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[SMALLER_RANK:.*]] = select %[[LHS_SMALLER]], %[[LHS_RANK]], %[[RHS_RANK]] : index
// CHECK: %[[LARGER_RANK:.*]] = select %[[LHS_SMALLER]], %[[RHS_RANK]], %[[LHS_RANK]] : index
// CHECK: %[[RANK_ERASED_LHS:.*]] = tensor.cast %[[LHS]] : tensor<?xindex> to tensor<?xindex>
@@ -446,11 +446,11 @@ func @broadcast(%a : tensor<?xindex>, %b : tensor<?xindex>) -> !shape.witness {
// CHECK: %[[TRUE:.*]] = constant true
// CHECK: %[[ALL_RESULT:.*]] = scf.for %[[VAL_16:.*]] = %[[RANK_DIFF]] to %[[LARGER_RANK]] step %[[C1]] iter_args(%[[ALL_SO_FAR:.*]] = %[[TRUE]]) -> (i1) {
// CHECK: %[[LARGER_EXTENT:.*]] = tensor.extract %[[LARGER_SHAPE]]{{\[}}%[[VAL_16]]] : tensor<?xindex>
-// CHECK: %[[LARGER_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[LARGER_EXTENT]], %[[C1]] : index
+// CHECK: %[[LARGER_EXTENT_IS_ONE:.*]] = cmpi eq, %[[LARGER_EXTENT]], %[[C1]] : index
// CHECK: %[[LHS_EXTENT_INDEX:.*]] = subi %[[VAL_16]], %[[RANK_DIFF]] : index
// CHECK: %[[SMALLER_EXTENT:.*]] = tensor.extract %[[SMALLER_SHAPE]]{{\[}}%[[LHS_EXTENT_INDEX]]] : tensor<?xindex>
-// CHECK: %[[SMALLER_EXTENT_IS_ONE:.*]] = cmpi "eq", %[[SMALLER_EXTENT]], %[[C1]] : index
-// CHECK: %[[EXTENTS_ARE_EQUAL:.*]] = cmpi "eq", %[[LARGER_EXTENT]], %[[SMALLER_EXTENT]] : index
+// CHECK: %[[SMALLER_EXTENT_IS_ONE:.*]] = cmpi eq, %[[SMALLER_EXTENT]], %[[C1]] : index
+// CHECK: %[[EXTENTS_ARE_EQUAL:.*]] = cmpi eq, %[[LARGER_EXTENT]], %[[SMALLER_EXTENT]] : index
// CHECK: %[[EITHER_EXTENT_IS_ONE:.*]] = or %[[LARGER_EXTENT_IS_ONE]], %[[SMALLER_EXTENT_IS_ONE]] : i1
// CHECK: %[[OR_EXTENTS_ARE_EQUAL:.*]] = or %[[EITHER_EXTENT_IS_ONE]], %[[EXTENTS_ARE_EQUAL]] : i1
// CHECK: %[[NEW_ALL_SO_FAR:.*]] = and %[[ALL_SO_FAR]], %[[OR_EXTENTS_ARE_EQUAL]] : i1
diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
index 0b334031aba9..2f091fef29df 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
@@ -40,7 +40,7 @@ func @simple_loop() {
// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32
// CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
^bb2(%0: index): // 2 preds: ^bb1, ^bb3
- %1 = cmpi "slt", %0, %c42 : index
+ %1 = cmpi slt, %0, %c42 : index
cond_br %1, ^bb3, ^bb4
// CHECK: ^bb3: // pred: ^bb2
@@ -193,7 +193,7 @@ func @func_args(i32, i32) -> i32 {
// CHECK32-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i32
// CHECK32-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb4
^bb2(%0: index): // 2 preds: ^bb1, ^bb3
- %1 = cmpi "slt", %0, %c42 : index
+ %1 = cmpi slt, %0, %c42 : index
cond_br %1, ^bb3, ^bb4
// CHECK-NEXT: ^bb3: // pred: ^bb2
@@ -266,7 +266,7 @@ func @imperfectly_nested_loops() {
// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb3, ^bb8
^bb2(%0: index): // 2 preds: ^bb1, ^bb7
- %1 = cmpi "slt", %0, %c42 : index
+ %1 = cmpi slt, %0, %c42 : index
cond_br %1, ^bb3, ^bb8
// CHECK-NEXT: ^bb3:
@@ -289,7 +289,7 @@ func @imperfectly_nested_loops() {
// CHECK-NEXT: {{.*}} = llvm.icmp "slt" {{.*}}, {{.*}} : i64
// CHECK-NEXT: llvm.cond_br {{.*}}, ^bb6, ^bb7
^bb5(%2: index): // 2 preds: ^bb4, ^bb6
- %3 = cmpi "slt", %2, %c56 : index
+ %3 = cmpi slt, %2, %c56 : index
cond_br %3, ^bb6, ^bb7
// CHECK-NEXT: ^bb6: // pred: ^bb5
@@ -382,7 +382,7 @@ func @more_imperfectly_nested_loops() {
%c42 = constant 42 : index
br ^bb2(%c0 : index)
^bb2(%0: index): // 2 preds: ^bb1, ^bb11
- %1 = cmpi "slt", %0, %c42 : index
+ %1 = cmpi slt, %0, %c42 : index
cond_br %1, ^bb3, ^bb12
^bb3: // pred: ^bb2
call @pre(%0) : (index) -> ()
@@ -392,7 +392,7 @@ func @more_imperfectly_nested_loops() {
%c56 = constant 56 : index
br ^bb5(%c7 : index)
^bb5(%2: index): // 2 preds: ^bb4, ^bb6
- %3 = cmpi "slt", %2, %c56 : index
+ %3 = cmpi slt, %2, %c56 : index
cond_br %3, ^bb6, ^bb7
^bb6: // pred: ^bb5
call @body2(%0, %2) : (index, index) -> ()
@@ -407,7 +407,7 @@ func @more_imperfectly_nested_loops() {
%c37 = constant 37 : index
br ^bb9(%c18 : index)
^bb9(%5: index): // 2 preds: ^bb8, ^bb10
- %6 = cmpi "slt", %5, %c37 : index
+ %6 = cmpi slt, %5, %c37 : index
cond_br %6, ^bb10, ^bb11
^bb10: // pred: ^bb9
call @body3(%0, %5) : (index, index) -> ()
@@ -528,7 +528,7 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
// CHECK-NEXT: %1 = llvm.sub %arg2, %arg3 : i32
%1 = subi %arg2, %arg3: i32
// CHECK-NEXT: %2 = llvm.icmp "slt" %arg2, %1 : i32
- %2 = cmpi "slt", %arg2, %1 : i32
+ %2 = cmpi slt, %arg2, %1 : i32
// CHECK-NEXT: %3 = llvm.sdiv %arg2, %arg3 : i32
%3 = divi_signed %arg2, %arg3 : i32
// CHECK-NEXT: %4 = llvm.udiv %arg2, %arg3 : i32
@@ -808,20 +808,20 @@ func @fcmp(f32, f32) -> () {
// CHECK-NEXT: llvm.fcmp "une" %arg0, %arg1 : f32
// CHECK-NEXT: llvm.fcmp "uno" %arg0, %arg1 : f32
// CHECK-NEXT: llvm.return
- %1 = cmpf "oeq", %arg0, %arg1 : f32
- %2 = cmpf "ogt", %arg0, %arg1 : f32
- %3 = cmpf "oge", %arg0, %arg1 : f32
- %4 = cmpf "olt", %arg0, %arg1 : f32
- %5 = cmpf "ole", %arg0, %arg1 : f32
- %6 = cmpf "one", %arg0, %arg1 : f32
- %7 = cmpf "ord", %arg0, %arg1 : f32
- %8 = cmpf "ueq", %arg0, %arg1 : f32
- %9 = cmpf "ugt", %arg0, %arg1 : f32
- %10 = cmpf "uge", %arg0, %arg1 : f32
- %11 = cmpf "ult", %arg0, %arg1 : f32
- %12 = cmpf "ule", %arg0, %arg1 : f32
- %13 = cmpf "une", %arg0, %arg1 : f32
- %14 = cmpf "uno", %arg0, %arg1 : f32
+ %1 = cmpf oeq, %arg0, %arg1 : f32
+ %2 = cmpf ogt, %arg0, %arg1 : f32
+ %3 = cmpf oge, %arg0, %arg1 : f32
+ %4 = cmpf olt, %arg0, %arg1 : f32
+ %5 = cmpf ole, %arg0, %arg1 : f32
+ %6 = cmpf one, %arg0, %arg1 : f32
+ %7 = cmpf ord, %arg0, %arg1 : f32
+ %8 = cmpf ueq, %arg0, %arg1 : f32
+ %9 = cmpf ugt, %arg0, %arg1 : f32
+ %10 = cmpf uge, %arg0, %arg1 : f32
+ %11 = cmpf ult, %arg0, %arg1 : f32
+ %12 = cmpf ule, %arg0, %arg1 : f32
+ %13 = cmpf une, %arg0, %arg1 : f32
+ %14 = cmpf uno, %arg0, %arg1 : f32
return
}
@@ -1296,19 +1296,19 @@ func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4,
// CHECK-LABEL: func @atomic_rmw
func @atomic_rmw(%I : memref<10xi32>, %ival : i32, %F : memref<10xf32>, %fval : f32, %i : index) {
- atomic_rmw "assign" %fval, %F[%i] : (f32, memref<10xf32>) -> f32
+ atomic_rmw assign %fval, %F[%i] : (f32, memref<10xf32>) -> f32
// CHECK: llvm.atomicrmw xchg %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "addi" %ival, %I[%i] : (i32, memref<10xi32>) -> i32
+ atomic_rmw addi %ival, %I[%i] : (i32, memref<10xi32>) -> i32
// CHECK: llvm.atomicrmw add %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "maxs" %ival, %I[%i] : (i32, memref<10xi32>) -> i32
+ atomic_rmw maxs %ival, %I[%i] : (i32, memref<10xi32>) -> i32
// CHECK: llvm.atomicrmw max %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "mins" %ival, %I[%i] : (i32, memref<10xi32>) -> i32
+ atomic_rmw mins %ival, %I[%i] : (i32, memref<10xi32>) -> i32
// CHECK: llvm.atomicrmw min %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "maxu" %ival, %I[%i] : (i32, memref<10xi32>) -> i32
+ atomic_rmw maxu %ival, %I[%i] : (i32, memref<10xi32>) -> i32
// CHECK: llvm.atomicrmw umax %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "minu" %ival, %I[%i] : (i32, memref<10xi32>) -> i32
+ atomic_rmw minu %ival, %I[%i] : (i32, memref<10xi32>) -> i32
// CHECK: llvm.atomicrmw umin %{{.*}}, %{{.*}} acq_rel
- atomic_rmw "addf" %fval, %F[%i] : (f32, memref<10xf32>) -> f32
+ atomic_rmw addf %fval, %F[%i] : (f32, memref<10xf32>) -> f32
// CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} acq_rel
return
}
diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
index 9e972c3a6c57..ef5fa156718b 100644
--- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
@@ -263,29 +263,29 @@ func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
// CHECK-LABEL: @cmpf
func @cmpf(%arg0 : f32, %arg1 : f32) {
// CHECK: spv.FOrdEqual
- %1 = cmpf "oeq", %arg0, %arg1 : f32
+ %1 = cmpf oeq, %arg0, %arg1 : f32
// CHECK: spv.FOrdGreaterThan
- %2 = cmpf "ogt", %arg0, %arg1 : f32
+ %2 = cmpf ogt, %arg0, %arg1 : f32
// CHECK: spv.FOrdGreaterThanEqual
- %3 = cmpf "oge", %arg0, %arg1 : f32
+ %3 = cmpf oge, %arg0, %arg1 : f32
// CHECK: spv.FOrdLessThan
- %4 = cmpf "olt", %arg0, %arg1 : f32
+ %4 = cmpf olt, %arg0, %arg1 : f32
// CHECK: spv.FOrdLessThanEqual
- %5 = cmpf "ole", %arg0, %arg1 : f32
+ %5 = cmpf ole, %arg0, %arg1 : f32
// CHECK: spv.FOrdNotEqual
- %6 = cmpf "one", %arg0, %arg1 : f32
+ %6 = cmpf one, %arg0, %arg1 : f32
// CHECK: spv.FUnordEqual
- %7 = cmpf "ueq", %arg0, %arg1 : f32
+ %7 = cmpf ueq, %arg0, %arg1 : f32
// CHECK: spv.FUnordGreaterThan
- %8 = cmpf "ugt", %arg0, %arg1 : f32
+ %8 = cmpf ugt, %arg0, %arg1 : f32
// CHECK: spv.FUnordGreaterThanEqual
- %9 = cmpf "uge", %arg0, %arg1 : f32
+ %9 = cmpf uge, %arg0, %arg1 : f32
// CHECK: spv.FUnordLessThan
- %10 = cmpf "ult", %arg0, %arg1 : f32
+ %10 = cmpf ult, %arg0, %arg1 : f32
// CHECK: FUnordLessThanEqual
- %11 = cmpf "ule", %arg0, %arg1 : f32
+ %11 = cmpf ule, %arg0, %arg1 : f32
// CHECK: spv.FUnordNotEqual
- %12 = cmpf "une", %arg0, %arg1 : f32
+ %12 = cmpf une, %arg0, %arg1 : f32
return
}
@@ -296,43 +296,43 @@ func @cmpf(%arg0 : f32, %arg1 : f32) {
// CHECK-LABEL: @cmpi
func @cmpi(%arg0 : i32, %arg1 : i32) {
// CHECK: spv.IEqual
- %0 = cmpi "eq", %arg0, %arg1 : i32
+ %0 = cmpi eq, %arg0, %arg1 : i32
// CHECK: spv.INotEqual
- %1 = cmpi "ne", %arg0, %arg1 : i32
+ %1 = cmpi ne, %arg0, %arg1 : i32
// CHECK: spv.SLessThan
- %2 = cmpi "slt", %arg0, %arg1 : i32
+ %2 = cmpi slt, %arg0, %arg1 : i32
// CHECK: spv.SLessThanEqual
- %3 = cmpi "sle", %arg0, %arg1 : i32
+ %3 = cmpi sle, %arg0, %arg1 : i32
// CHECK: spv.SGreaterThan
- %4 = cmpi "sgt", %arg0, %arg1 : i32
+ %4 = cmpi sgt, %arg0, %arg1 : i32
// CHECK: spv.SGreaterThanEqual
- %5 = cmpi "sge", %arg0, %arg1 : i32
+ %5 = cmpi sge, %arg0, %arg1 : i32
// CHECK: spv.ULessThan
- %6 = cmpi "ult", %arg0, %arg1 : i32
+ %6 = cmpi ult, %arg0, %arg1 : i32
// CHECK: spv.ULessThanEqual
- %7 = cmpi "ule", %arg0, %arg1 : i32
+ %7 = cmpi ule, %arg0, %arg1 : i32
// CHECK: spv.UGreaterThan
- %8 = cmpi "ugt", %arg0, %arg1 : i32
+ %8 = cmpi ugt, %arg0, %arg1 : i32
// CHECK: spv.UGreaterThanEqual
- %9 = cmpi "uge", %arg0, %arg1 : i32
+ %9 = cmpi uge, %arg0, %arg1 : i32
return
}
// CHECK-LABEL: @boolcmpi
func @boolcmpi(%arg0 : i1, %arg1 : i1) {
// CHECK: spv.LogicalEqual
- %0 = cmpi "eq", %arg0, %arg1 : i1
+ %0 = cmpi eq, %arg0, %arg1 : i1
// CHECK: spv.LogicalNotEqual
- %1 = cmpi "ne", %arg0, %arg1 : i1
+ %1 = cmpi ne, %arg0, %arg1 : i1
return
}
// CHECK-LABEL: @vecboolcmpi
func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
// CHECK: spv.LogicalEqual
- %0 = cmpi "eq", %arg0, %arg1 : vector<4xi1>
+ %0 = cmpi eq, %arg0, %arg1 : vector<4xi1>
// CHECK: spv.LogicalNotEqual
- %1 = cmpi "ne", %arg0, %arg1 : vector<4xi1>
+ %1 = cmpi ne, %arg0, %arg1 : vector<4xi1>
return
}
@@ -699,7 +699,7 @@ module attributes {
// CHECK-LABEL: @select
func @select(%arg0 : i32, %arg1 : i32) {
- %0 = cmpi "sle", %arg0, %arg1 : i32
+ %0 = cmpi sle, %arg0, %arg1 : i32
// CHECK: spv.Select
%1 = select %0, %arg0, %arg1 : i32
return
diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
index e5bb65aa4208..7f69638d749b 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
@@ -209,7 +209,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
// CHECK-DAG: %[[dim:.*]] = dim %[[A]], %[[C0]] : memref<?x?xf32>
// CHECK: affine.for %[[I:.*]] = 0 to 3 {
// CHECK: %[[add:.*]] = affine.apply #[[$MAP0]](%[[I]])[%[[base]]]
- // CHECK: %[[cond1:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
+ // CHECK: %[[cond1:.*]] = cmpi slt, %[[add]], %[[dim]] : index
// CHECK: scf.if %[[cond1]] {
// CHECK: %[[vec_1d:.*]] = vector.transfer_read %[[A]][%[[add]], %[[base]]], %[[cst]] : memref<?x?xf32>, vector<15xf32>
// CHECK: store %[[vec_1d]], %[[alloc]][%[[I]]] : memref<3xvector<15xf32>>
@@ -224,7 +224,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
// FULL-UNROLL: %[[C0:.*]] = constant 0 : index
// FULL-UNROLL: %[[SPLAT:.*]] = constant dense<7.000000e+00> : vector<15xf32>
// FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %[[C0]] : memref<?x?xf32>
- // FULL-UNROLL: cmpi "slt", %[[base]], %[[DIM]] : index
+ // FULL-UNROLL: cmpi slt, %[[base]], %[[DIM]] : index
// FULL-UNROLL: %[[VEC1:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
// FULL-UNROLL: vector.transfer_read %[[A]][%[[base]], %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
// FULL-UNROLL: vector.insert %{{.*}}, %[[VEC0]] [0] : vector<15xf32> into vector<3x15xf32>
@@ -234,7 +234,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
// FULL-UNROLL: scf.yield %{{.*}} : vector<3x15xf32>
// FULL-UNROLL: }
// FULL-UNROLL: affine.apply #[[$MAP1]]()[%[[base]]]
- // FULL-UNROLL: cmpi "slt", %{{.*}}, %[[DIM]] : index
+ // FULL-UNROLL: cmpi slt, %{{.*}}, %[[DIM]] : index
// FULL-UNROLL: %[[VEC2:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
// FULL-UNROLL: vector.transfer_read %[[A]][%{{.*}}, %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
// FULL-UNROLL: vector.insert %{{.*}}, %[[VEC1]] [1] : vector<15xf32> into vector<3x15xf32>
@@ -244,7 +244,7 @@ func @transfer_read_progressive(%A : memref<?x?xf32>, %base: index) -> vector<3x
// FULL-UNROLL: scf.yield %{{.*}} : vector<3x15xf32>
// FULL-UNROLL: }
// FULL-UNROLL: affine.apply #[[$MAP2]]()[%[[base]]]
- // FULL-UNROLL: cmpi "slt", %{{.*}}, %[[DIM]] : index
+ // FULL-UNROLL: cmpi slt, %{{.*}}, %[[DIM]] : index
// FULL-UNROLL: %[[VEC3:.*]] = scf.if %{{.*}} -> (vector<3x15xf32>) {
// FULL-UNROLL: vector.transfer_read %[[A]][%{{.*}}, %[[base]]], %[[pad]] : memref<?x?xf32>, vector<15xf32>
// FULL-UNROLL: vector.insert %{{.*}}, %[[VEC2]] [2] : vector<15xf32> into vector<3x15xf32>
@@ -283,7 +283,7 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
// CHECK: %[[dim:.*]] = dim %[[A]], %[[C0]] : memref<?x?xf32>
// CHECK: affine.for %[[I:.*]] = 0 to 3 {
// CHECK: %[[add:.*]] = affine.apply #[[$MAP0]](%[[I]])[%[[base]]]
- // CHECK: %[[cmp:.*]] = cmpi "slt", %[[add]], %[[dim]] : index
+ // CHECK: %[[cmp:.*]] = cmpi slt, %[[add]], %[[dim]] : index
// CHECK: scf.if %[[cmp]] {
// CHECK: %[[vec_1d:.*]] = load %0[%[[I]]] : memref<3xvector<15xf32>>
// CHECK: vector.transfer_write %[[vec_1d]], %[[A]][%[[add]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
@@ -291,19 +291,19 @@ func @transfer_write_progressive(%A : memref<?x?xf32>, %base: index, %vec: vecto
// FULL-UNROLL: %[[C0:.*]] = constant 0 : index
// FULL-UNROLL: %[[DIM:.*]] = dim %[[A]], %[[C0]] : memref<?x?xf32>
- // FULL-UNROLL: %[[CMP0:.*]] = cmpi "slt", %[[base]], %[[DIM]] : index
+ // FULL-UNROLL: %[[CMP0:.*]] = cmpi slt, %[[base]], %[[DIM]] : index
// FULL-UNROLL: scf.if %[[CMP0]] {
// FULL-UNROLL: %[[V0:.*]] = vector.extract %[[vec]][0] : vector<3x15xf32>
// FULL-UNROLL: vector.transfer_write %[[V0]], %[[A]][%[[base]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
// FULL-UNROLL: }
// FULL-UNROLL: %[[I1:.*]] = affine.apply #[[$MAP1]]()[%[[base]]]
- // FULL-UNROLL: %[[CMP1:.*]] = cmpi "slt", %[[I1]], %[[DIM]] : index
+ // FULL-UNROLL: %[[CMP1:.*]] = cmpi slt, %[[I1]], %[[DIM]] : index
// FULL-UNROLL: scf.if %[[CMP1]] {
// FULL-UNROLL: %[[V1:.*]] = vector.extract %[[vec]][1] : vector<3x15xf32>
// FULL-UNROLL: vector.transfer_write %[[V1]], %[[A]][%[[I1]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
// FULL-UNROLL: }
// FULL-UNROLL: %[[I2:.*]] = affine.apply #[[$MAP2]]()[%[[base]]]
- // FULL-UNROLL: %[[CMP2:.*]] = cmpi "slt", %[[I2]], %[[DIM]] : index
+ // FULL-UNROLL: %[[CMP2:.*]] = cmpi slt, %[[I2]], %[[DIM]] : index
// FULL-UNROLL: scf.if %[[CMP2]] {
// FULL-UNROLL: %[[V2:.*]] = vector.extract %[[vec]][2] : vector<3x15xf32>
// FULL-UNROLL: vector.transfer_write %[[V2]], %[[A]][%[[I2]], %[[base]]] : vector<15xf32>, memref<?x?xf32>
@@ -387,7 +387,7 @@ func @transfer_read_minor_identity(%A : memref<?x?x?x?xf32>) -> vector<3x3xf32>
// CHECK: %[[m:.*]] = alloca() : memref<3xvector<3xf32>>
// CHECK: %[[d:.*]] = dim %[[A]], %[[c2]] : memref<?x?x?x?xf32>
// CHECK: affine.for %[[arg1:.*]] = 0 to 3 {
-// CHECK: %[[cmp:.*]] = cmpi "slt", %[[arg1]], %[[d]] : index
+// CHECK: %[[cmp:.*]] = cmpi slt, %[[arg1]], %[[d]] : index
// CHECK: scf.if %[[cmp]] {
// CHECK: %[[tr:.*]] = vector.transfer_read %[[A]][%[[c0]], %[[c0]], %[[arg1]], %[[c0]]], %[[cst]] : memref<?x?x?x?xf32>, vector<3xf32>
// CHECK: store %[[tr]], %[[m]][%[[arg1]]] : memref<3xvector<3xf32>>
@@ -418,7 +418,7 @@ func @transfer_write_minor_identity(%A : vector<3x3xf32>, %B : memref<?x?x?x?xf3
// CHECK: store %[[A]], %[[cast]][] : memref<vector<3x3xf32>>
// CHECK: %[[d:.*]] = dim %[[B]], %[[c2]] : memref<?x?x?x?xf32>
// CHECK: affine.for %[[arg2:.*]] = 0 to 3 {
-// CHECK: %[[cmp:.*]] = cmpi "slt", %[[arg2]], %[[d]] : index
+// CHECK: %[[cmp:.*]] = cmpi slt, %[[arg2]], %[[d]] : index
// CHECK: scf.if %[[cmp]] {
// CHECK: %[[tmp:.*]] = load %[[m]][%[[arg2]]] : memref<3xvector<3xf32>>
// CHECK: vector.transfer_write %[[tmp]], %[[B]][%[[c0]], %[[c0]], %[[arg2]], %[[c0]]] : vector<3xf32>, memref<?x?x?x?xf32>
diff --git a/mlir/test/Dialect/Affine/parallelize.mlir b/mlir/test/Dialect/Affine/parallelize.mlir
index 08aaa7f32fc8..3f7a79daf91d 100644
--- a/mlir/test/Dialect/Affine/parallelize.mlir
+++ b/mlir/test/Dialect/Affine/parallelize.mlir
@@ -25,7 +25,7 @@ func @reduce_window_max() {
affine.for %arg7 = 0 to 1 {
%2 = affine.load %0[%arg0, %arg1, %arg2, %arg3] : memref<1x8x8x64xf32>
%3 = affine.load %1[%arg0 + %arg4, %arg1 * 2 + %arg5, %arg2 * 2 + %arg6, %arg3 + %arg7] : memref<1x18x18x64xf32>
- %4 = cmpf "ogt", %2, %3 : f32
+ %4 = cmpf ogt, %2, %3 : f32
%5 = select %4, %2, %3 : f32
affine.store %5, %0[%arg0, %arg1, %arg2, %arg3] : memref<1x8x8x64xf32>
}
@@ -61,7 +61,7 @@ func @reduce_window_max() {
// CHECK: affine.parallel (%[[a7:.*]]) = (0) to (1) {
// CHECK: %[[lhs:.*]] = affine.load %[[v0]][%[[a0]], %[[a1]], %[[a2]], %[[a3]]] : memref<1x8x8x64xf32>
// CHECK: %[[rhs:.*]] = affine.load %[[v1]][%[[a0]] + %[[a4]], %[[a1]] * 2 + %[[a5]], %[[a2]] * 2 + %[[a6]], %[[a3]] + %[[a7]]] : memref<1x18x18x64xf32>
-// CHECK: %[[res:.*]] = cmpf "ogt", %[[lhs]], %[[rhs]] : f32
+// CHECK: %[[res:.*]] = cmpf ogt, %[[lhs]], %[[rhs]] : f32
// CHECK: %[[sel:.*]] = select %[[res]], %[[lhs]], %[[rhs]] : f32
// CHECK: affine.store %[[sel]], %[[v0]][%[[a0]], %[[a1]], %[[a2]], %[[a3]]] : memref<1x8x8x64xf32>
// CHECK: }
diff --git a/mlir/test/Dialect/GPU/all-reduce-max.mlir b/mlir/test/Dialect/GPU/all-reduce-max.mlir
index fb5fcafc3536..04cb68aaf7b7 100644
--- a/mlir/test/Dialect/GPU/all-reduce-max.mlir
+++ b/mlir/test/Dialect/GPU/all-reduce-max.mlir
@@ -35,16 +35,16 @@ gpu.module @kernels {
// CHECK: [[VAL_27:%.*]] = addi [[VAL_25]], [[VAL_18]] : i32
// CHECK: [[VAL_28:%.*]] = muli [[VAL_26]], [[VAL_16]] : i32
// CHECK: [[VAL_29:%.*]] = and [[VAL_27]], [[VAL_2]] : i32
- // CHECK: [[VAL_30:%.*]] = cmpi "eq", [[VAL_29]], [[VAL_3]] : i32
+ // CHECK: [[VAL_30:%.*]] = cmpi eq, [[VAL_29]], [[VAL_3]] : i32
// CHECK: [[VAL_31:%.*]] = subi [[VAL_27]], [[VAL_29]] : i32
// CHECK: [[VAL_32:%.*]] = subi [[VAL_28]], [[VAL_31]] : i32
- // CHECK: [[VAL_33:%.*]] = cmpi "slt", [[VAL_32]], [[VAL_5]] : i32
+ // CHECK: [[VAL_33:%.*]] = cmpi slt, [[VAL_32]], [[VAL_5]] : i32
// CHECK: cond_br [[VAL_33]], ^bb1, ^bb17
// CHECK: ^bb1:
// CHECK: [[VAL_34:%.*]], [[VAL_35:%.*]] = gpu.shuffle [[VAL_0]], [[VAL_6]], [[VAL_32]] xor : f32
// CHECK: cond_br [[VAL_35]], ^bb2, ^bb3
// CHECK: ^bb2:
- // CHECK: [[VAL_36:%.*]] = cmpf "ugt", [[VAL_0]], [[VAL_34]] : f32
+ // CHECK: [[VAL_36:%.*]] = cmpf ugt, [[VAL_0]], [[VAL_34]] : f32
// CHECK: [[VAL_37:%.*]] = select [[VAL_36]], [[VAL_0]], [[VAL_34]] : f32
// CHECK: br ^bb4([[VAL_37]] : f32)
// CHECK: ^bb3:
@@ -53,7 +53,7 @@ gpu.module @kernels {
// CHECK: [[VAL_39:%.*]], [[VAL_40:%.*]] = gpu.shuffle [[VAL_38]], [[VAL_7]], [[VAL_32]] xor : f32
// CHECK: cond_br [[VAL_40]], ^bb5, ^bb6
// CHECK: ^bb5:
- // CHECK: [[VAL_41:%.*]] = cmpf "ugt", [[VAL_38]], [[VAL_39]] : f32
+ // CHECK: [[VAL_41:%.*]] = cmpf ugt, [[VAL_38]], [[VAL_39]] : f32
// CHECK: [[VAL_42:%.*]] = select [[VAL_41]], [[VAL_38]], [[VAL_39]] : f32
// CHECK: br ^bb7([[VAL_42]] : f32)
// CHECK: ^bb6:
@@ -62,7 +62,7 @@ gpu.module @kernels {
// CHECK: [[VAL_44:%.*]], [[VAL_45:%.*]] = gpu.shuffle [[VAL_43]], [[VAL_8]], [[VAL_32]] xor : f32
// CHECK: cond_br [[VAL_45]], ^bb8, ^bb9
// CHECK: ^bb8:
- // CHECK: [[VAL_46:%.*]] = cmpf "ugt", [[VAL_43]], [[VAL_44]] : f32
+ // CHECK: [[VAL_46:%.*]] = cmpf ugt, [[VAL_43]], [[VAL_44]] : f32
// CHECK: [[VAL_47:%.*]] = select [[VAL_46]], [[VAL_43]], [[VAL_44]] : f32
// CHECK: br ^bb10([[VAL_47]] : f32)
// CHECK: ^bb9:
@@ -71,7 +71,7 @@ gpu.module @kernels {
// CHECK: [[VAL_49:%.*]], [[VAL_50:%.*]] = gpu.shuffle [[VAL_48]], [[VAL_9]], [[VAL_32]] xor : f32
// CHECK: cond_br [[VAL_50]], ^bb11, ^bb12
// CHECK: ^bb11:
- // CHECK: [[VAL_51:%.*]] = cmpf "ugt", [[VAL_48]], [[VAL_49]] : f32
+ // CHECK: [[VAL_51:%.*]] = cmpf ugt, [[VAL_48]], [[VAL_49]] : f32
// CHECK: [[VAL_52:%.*]] = select [[VAL_51]], [[VAL_48]], [[VAL_49]] : f32
// CHECK: br ^bb13([[VAL_52]] : f32)
// CHECK: ^bb12:
@@ -80,7 +80,7 @@ gpu.module @kernels {
// CHECK: [[VAL_54:%.*]], [[VAL_55:%.*]] = gpu.shuffle [[VAL_53]], [[VAL_10]], [[VAL_32]] xor : f32
// CHECK: cond_br [[VAL_55]], ^bb14, ^bb15
// CHECK: ^bb14:
- // CHECK: [[VAL_56:%.*]] = cmpf "ugt", [[VAL_53]], [[VAL_54]] : f32
+ // CHECK: [[VAL_56:%.*]] = cmpf ugt, [[VAL_53]], [[VAL_54]] : f32
// CHECK: [[VAL_57:%.*]] = select [[VAL_56]], [[VAL_53]], [[VAL_54]] : f32
// CHECK: br ^bb16([[VAL_57]] : f32)
// CHECK: ^bb15:
@@ -89,19 +89,19 @@ gpu.module @kernels {
// CHECK: br ^bb18([[VAL_58]] : f32)
// CHECK: ^bb17:
// CHECK: [[VAL_59:%.*]], [[VAL_60:%.*]] = gpu.shuffle [[VAL_0]], [[VAL_6]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_61:%.*]] = cmpf "ugt", [[VAL_0]], [[VAL_59]] : f32
+ // CHECK: [[VAL_61:%.*]] = cmpf ugt, [[VAL_0]], [[VAL_59]] : f32
// CHECK: [[VAL_62:%.*]] = select [[VAL_61]], [[VAL_0]], [[VAL_59]] : f32
// CHECK: [[VAL_63:%.*]], [[VAL_64:%.*]] = gpu.shuffle [[VAL_62]], [[VAL_7]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_65:%.*]] = cmpf "ugt", [[VAL_62]], [[VAL_63]] : f32
+ // CHECK: [[VAL_65:%.*]] = cmpf ugt, [[VAL_62]], [[VAL_63]] : f32
// CHECK: [[VAL_66:%.*]] = select [[VAL_65]], [[VAL_62]], [[VAL_63]] : f32
// CHECK: [[VAL_67:%.*]], [[VAL_68:%.*]] = gpu.shuffle [[VAL_66]], [[VAL_8]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_69:%.*]] = cmpf "ugt", [[VAL_66]], [[VAL_67]] : f32
+ // CHECK: [[VAL_69:%.*]] = cmpf ugt, [[VAL_66]], [[VAL_67]] : f32
// CHECK: [[VAL_70:%.*]] = select [[VAL_69]], [[VAL_66]], [[VAL_67]] : f32
// CHECK: [[VAL_71:%.*]], [[VAL_72:%.*]] = gpu.shuffle [[VAL_70]], [[VAL_9]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_73:%.*]] = cmpf "ugt", [[VAL_70]], [[VAL_71]] : f32
+ // CHECK: [[VAL_73:%.*]] = cmpf ugt, [[VAL_70]], [[VAL_71]] : f32
// CHECK: [[VAL_74:%.*]] = select [[VAL_73]], [[VAL_70]], [[VAL_71]] : f32
// CHECK: [[VAL_75:%.*]], [[VAL_76:%.*]] = gpu.shuffle [[VAL_74]], [[VAL_10]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_77:%.*]] = cmpf "ugt", [[VAL_74]], [[VAL_75]] : f32
+ // CHECK: [[VAL_77:%.*]] = cmpf ugt, [[VAL_74]], [[VAL_75]] : f32
// CHECK: [[VAL_78:%.*]] = select [[VAL_77]], [[VAL_74]], [[VAL_75]] : f32
// CHECK: br ^bb18([[VAL_78]] : f32)
// CHECK: ^bb18([[VAL_79:%.*]]: f32):
@@ -117,18 +117,18 @@ gpu.module @kernels {
// CHECK: gpu.barrier
// CHECK: [[VAL_82:%.*]] = addi [[VAL_28]], [[VAL_2]] : i32
// CHECK: [[VAL_83:%.*]] = divi_signed [[VAL_82]], [[VAL_5]] : i32
- // CHECK: [[VAL_84:%.*]] = cmpi "slt", [[VAL_27]], [[VAL_83]] : i32
+ // CHECK: [[VAL_84:%.*]] = cmpi slt, [[VAL_27]], [[VAL_83]] : i32
// CHECK: cond_br [[VAL_84]], ^bb22, ^bb41
// CHECK: ^bb22:
// CHECK: [[VAL_85:%.*]] = index_cast [[VAL_27]] : i32 to index
// CHECK: [[VAL_86:%.*]] = load [[VAL_1]]{{\[}}[[VAL_85]]] : memref<32xf32, 3>
- // CHECK: [[VAL_87:%.*]] = cmpi "slt", [[VAL_83]], [[VAL_5]] : i32
+ // CHECK: [[VAL_87:%.*]] = cmpi slt, [[VAL_83]], [[VAL_5]] : i32
// CHECK: cond_br [[VAL_87]], ^bb23, ^bb39
// CHECK: ^bb23:
// CHECK: [[VAL_88:%.*]], [[VAL_89:%.*]] = gpu.shuffle [[VAL_86]], [[VAL_6]], [[VAL_83]] xor : f32
// CHECK: cond_br [[VAL_89]], ^bb24, ^bb25
// CHECK: ^bb24:
- // CHECK: [[VAL_90:%.*]] = cmpf "ugt", [[VAL_86]], [[VAL_88]] : f32
+ // CHECK: [[VAL_90:%.*]] = cmpf ugt, [[VAL_86]], [[VAL_88]] : f32
// CHECK: [[VAL_91:%.*]] = select [[VAL_90]], [[VAL_86]], [[VAL_88]] : f32
// CHECK: br ^bb26([[VAL_91]] : f32)
// CHECK: ^bb25:
@@ -137,7 +137,7 @@ gpu.module @kernels {
// CHECK: [[VAL_93:%.*]], [[VAL_94:%.*]] = gpu.shuffle [[VAL_92]], [[VAL_7]], [[VAL_83]] xor : f32
// CHECK: cond_br [[VAL_94]], ^bb27, ^bb28
// CHECK: ^bb27:
- // CHECK: [[VAL_95:%.*]] = cmpf "ugt", [[VAL_92]], [[VAL_93]] : f32
+ // CHECK: [[VAL_95:%.*]] = cmpf ugt, [[VAL_92]], [[VAL_93]] : f32
// CHECK: [[VAL_96:%.*]] = select [[VAL_95]], [[VAL_92]], [[VAL_93]] : f32
// CHECK: br ^bb29([[VAL_96]] : f32)
// CHECK: ^bb28:
@@ -146,7 +146,7 @@ gpu.module @kernels {
// CHECK: [[VAL_98:%.*]], [[VAL_99:%.*]] = gpu.shuffle [[VAL_97]], [[VAL_8]], [[VAL_83]] xor : f32
// CHECK: cond_br [[VAL_99]], ^bb30, ^bb31
// CHECK: ^bb30:
- // CHECK: [[VAL_100:%.*]] = cmpf "ugt", [[VAL_97]], [[VAL_98]] : f32
+ // CHECK: [[VAL_100:%.*]] = cmpf ugt, [[VAL_97]], [[VAL_98]] : f32
// CHECK: [[VAL_101:%.*]] = select [[VAL_100]], [[VAL_97]], [[VAL_98]] : f32
// CHECK: br ^bb32([[VAL_101]] : f32)
// CHECK: ^bb31:
@@ -155,7 +155,7 @@ gpu.module @kernels {
// CHECK: [[VAL_103:%.*]], [[VAL_104:%.*]] = gpu.shuffle [[VAL_102]], [[VAL_9]], [[VAL_83]] xor : f32
// CHECK: cond_br [[VAL_104]], ^bb33, ^bb34
// CHECK: ^bb33:
- // CHECK: [[VAL_105:%.*]] = cmpf "ugt", [[VAL_102]], [[VAL_103]] : f32
+ // CHECK: [[VAL_105:%.*]] = cmpf ugt, [[VAL_102]], [[VAL_103]] : f32
// CHECK: [[VAL_106:%.*]] = select [[VAL_105]], [[VAL_102]], [[VAL_103]] : f32
// CHECK: br ^bb35([[VAL_106]] : f32)
// CHECK: ^bb34:
@@ -164,7 +164,7 @@ gpu.module @kernels {
// CHECK: [[VAL_108:%.*]], [[VAL_109:%.*]] = gpu.shuffle [[VAL_107]], [[VAL_10]], [[VAL_83]] xor : f32
// CHECK: cond_br [[VAL_109]], ^bb36, ^bb37
// CHECK: ^bb36:
- // CHECK: [[VAL_110:%.*]] = cmpf "ugt", [[VAL_107]], [[VAL_108]] : f32
+ // CHECK: [[VAL_110:%.*]] = cmpf ugt, [[VAL_107]], [[VAL_108]] : f32
// CHECK: [[VAL_111:%.*]] = select [[VAL_110]], [[VAL_107]], [[VAL_108]] : f32
// CHECK: br ^bb38([[VAL_111]] : f32)
// CHECK: ^bb37:
@@ -173,19 +173,19 @@ gpu.module @kernels {
// CHECK: br ^bb40([[VAL_112]] : f32)
// CHECK: ^bb39:
// CHECK: [[VAL_113:%.*]], [[VAL_114:%.*]] = gpu.shuffle [[VAL_86]], [[VAL_6]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_115:%.*]] = cmpf "ugt", [[VAL_86]], [[VAL_113]] : f32
+ // CHECK: [[VAL_115:%.*]] = cmpf ugt, [[VAL_86]], [[VAL_113]] : f32
// CHECK: [[VAL_116:%.*]] = select [[VAL_115]], [[VAL_86]], [[VAL_113]] : f32
// CHECK: [[VAL_117:%.*]], [[VAL_118:%.*]] = gpu.shuffle [[VAL_116]], [[VAL_7]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_119:%.*]] = cmpf "ugt", [[VAL_116]], [[VAL_117]] : f32
+ // CHECK: [[VAL_119:%.*]] = cmpf ugt, [[VAL_116]], [[VAL_117]] : f32
// CHECK: [[VAL_120:%.*]] = select [[VAL_119]], [[VAL_116]], [[VAL_117]] : f32
// CHECK: [[VAL_121:%.*]], [[VAL_122:%.*]] = gpu.shuffle [[VAL_120]], [[VAL_8]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_123:%.*]] = cmpf "ugt", [[VAL_120]], [[VAL_121]] : f32
+ // CHECK: [[VAL_123:%.*]] = cmpf ugt, [[VAL_120]], [[VAL_121]] : f32
// CHECK: [[VAL_124:%.*]] = select [[VAL_123]], [[VAL_120]], [[VAL_121]] : f32
// CHECK: [[VAL_125:%.*]], [[VAL_126:%.*]] = gpu.shuffle [[VAL_124]], [[VAL_9]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_127:%.*]] = cmpf "ugt", [[VAL_124]], [[VAL_125]] : f32
+ // CHECK: [[VAL_127:%.*]] = cmpf ugt, [[VAL_124]], [[VAL_125]] : f32
// CHECK: [[VAL_128:%.*]] = select [[VAL_127]], [[VAL_124]], [[VAL_125]] : f32
// CHECK: [[VAL_129:%.*]], [[VAL_130:%.*]] = gpu.shuffle [[VAL_128]], [[VAL_10]], [[VAL_5]] xor : f32
- // CHECK: [[VAL_131:%.*]] = cmpf "ugt", [[VAL_128]], [[VAL_129]] : f32
+ // CHECK: [[VAL_131:%.*]] = cmpf ugt, [[VAL_128]], [[VAL_129]] : f32
// CHECK: [[VAL_132:%.*]] = select [[VAL_131]], [[VAL_128]], [[VAL_129]] : f32
// CHECK: br ^bb40([[VAL_132]] : f32)
// CHECK: ^bb40([[VAL_133:%.*]]: f32):
diff --git a/mlir/test/Dialect/GPU/all-reduce.mlir b/mlir/test/Dialect/GPU/all-reduce.mlir
index 758fb44cbc88..4cc4869dfee5 100644
--- a/mlir/test/Dialect/GPU/all-reduce.mlir
+++ b/mlir/test/Dialect/GPU/all-reduce.mlir
@@ -35,10 +35,10 @@ gpu.module @kernels {
// CHECK: [[VAL_27:%.*]] = addi [[VAL_25]], [[VAL_18]] : i32
// CHECK: [[VAL_28:%.*]] = muli [[VAL_26]], [[VAL_16]] : i32
// CHECK: [[VAL_29:%.*]] = and [[VAL_27]], [[VAL_2]] : i32
- // CHECK: [[VAL_30:%.*]] = cmpi "eq", [[VAL_29]], [[VAL_3]] : i32
+ // CHECK: [[VAL_30:%.*]] = cmpi eq, [[VAL_29]], [[VAL_3]] : i32
// CHECK: [[VAL_31:%.*]] = subi [[VAL_27]], [[VAL_29]] : i32
// CHECK: [[VAL_32:%.*]] = subi [[VAL_28]], [[VAL_31]] : i32
- // CHECK: [[VAL_33:%.*]] = cmpi "slt", [[VAL_32]], [[VAL_5]] : i32
+ // CHECK: [[VAL_33:%.*]] = cmpi slt, [[VAL_32]], [[VAL_5]] : i32
// CHECK: cond_br [[VAL_33]], ^bb1, ^bb17
// CHECK: ^bb1:
// CHECK: [[VAL_34:%.*]], [[VAL_35:%.*]] = gpu.shuffle [[VAL_0]], [[VAL_6]], [[VAL_32]] xor : f32
@@ -107,12 +107,12 @@ gpu.module @kernels {
// CHECK: gpu.barrier
// CHECK: [[VAL_72:%.*]] = addi [[VAL_28]], [[VAL_2]] : i32
// CHECK: [[VAL_73:%.*]] = divi_signed [[VAL_72]], [[VAL_5]] : i32
- // CHECK: [[VAL_74:%.*]] = cmpi "slt", [[VAL_27]], [[VAL_73]] : i32
+ // CHECK: [[VAL_74:%.*]] = cmpi slt, [[VAL_27]], [[VAL_73]] : i32
// CHECK: cond_br [[VAL_74]], ^bb22, ^bb41
// CHECK: ^bb22:
// CHECK: [[VAL_75:%.*]] = index_cast [[VAL_27]] : i32 to index
// CHECK: [[VAL_76:%.*]] = load [[VAL_1]]{{\[}}[[VAL_75]]] : memref<32xf32, 3>
- // CHECK: [[VAL_77:%.*]] = cmpi "slt", [[VAL_73]], [[VAL_5]] : i32
+ // CHECK: [[VAL_77:%.*]] = cmpi slt, [[VAL_73]], [[VAL_5]] : i32
// CHECK: cond_br [[VAL_77]], ^bb23, ^bb39
// CHECK: ^bb23:
// CHECK: [[VAL_78:%.*]], [[VAL_79:%.*]] = gpu.shuffle [[VAL_76]], [[VAL_6]], [[VAL_73]] xor : f32
diff --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
index 8dca137843bb..e6bcf47e7e29 100644
--- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
+++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
@@ -80,8 +80,8 @@ func @cmpf(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<i1> {
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]]
// CHECK-SAME: outs(%[[INIT]]
// CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: i1):
- // CHECK: cmpf "olt", %{{.*}}, %{{.*}} : f32
- %0 = cmpf "olt", %arg0, %arg1 : tensor<f32>
+ // CHECK: cmpf olt, %{{.*}}, %{{.*}} : f32
+ %0 = cmpf olt, %arg0, %arg1 : tensor<f32>
return %0 : tensor<i1>
}
@@ -103,8 +103,8 @@ func @cmpf(%arg0: tensor<4x?x?x8x2x?xf32>, %arg1: tensor<4x?x?x8x2x?xf32>) -> te
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]]
// CHECK-SAME: outs(%[[INIT]]
// CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: i1):
- // CHECK: cmpf "olt", %{{.*}}, %{{.*}} : f32
- %0 = cmpf "olt", %arg0, %arg1 : tensor<4x?x?x8x2x?xf32>
+ // CHECK: cmpf olt, %{{.*}}, %{{.*}} : f32
+ %0 = cmpf olt, %arg0, %arg1 : tensor<4x?x?x8x2x?xf32>
return %0 : tensor<4x?x?x8x2x?xi1>
}
diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 7801bf1dbb56..71fd7c7cf15a 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -489,7 +489,7 @@ func @pooling_max_padding(%arg0: memref<?x?xf32>,
// CHECKLOOP: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKLOOP: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
// CHECKLOOP: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-// CHECKLOOP: %[[CMP:.*]] = cmpf "ogt", %[[RHS]], %[[SEL]] : f32
+// CHECKLOOP: %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32
// CHECKLOOP: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
// CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
@@ -509,7 +509,7 @@ func @pooling_max_padding(%arg0: memref<?x?xf32>,
// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKPARALLEL: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-// CHECKPARALLEL: %[[CMP:.*]] = cmpf "ogt", %[[RHS]], %[[SEL]] : f32
+// CHECKPARALLEL: %[[CMP:.*]] = cmpf ogt, %[[RHS]], %[[SEL]] : f32
// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
// CHECKPARALLEL: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
@@ -537,7 +537,7 @@ func @pooling_max_padding_i32(%arg0: memref<?x?xi32>,
// CHECKLOOP: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKLOOP: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
// CHECKLOOP: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-// CHECKLOOP: %[[CMP:.*]] = cmpi "sgt", %[[RHS]], %[[SEL]] : i32
+// CHECKLOOP: %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32
// CHECKLOOP: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
// CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xi32>
@@ -557,7 +557,7 @@ func @pooling_max_padding_i32(%arg0: memref<?x?xi32>,
// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKPARALLEL: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-// CHECKPARALLEL: %[[CMP:.*]] = cmpi "sgt", %[[RHS]], %[[SEL]] : i32
+// CHECKPARALLEL: %[[CMP:.*]] = cmpi sgt, %[[RHS]], %[[SEL]] : i32
// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
// CHECKPARALLEL: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xi32>
@@ -623,7 +623,7 @@ func @pooling_min_padding(%arg0: memref<?x?xf32>,
// CHECKLOOP: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKLOOP: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
// CHECKLOOP: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-// CHECKLOOP: %[[CMP:.*]] = cmpf "olt", %[[RHS]], %[[SEL]] : f32
+// CHECKLOOP: %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32
// CHECKLOOP: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
// CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
@@ -643,7 +643,7 @@ func @pooling_min_padding(%arg0: memref<?x?xf32>,
// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKPARALLEL: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xf32>
// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : f32
-// CHECKPARALLEL: %[[CMP:.*]] = cmpf "olt", %[[RHS]], %[[SEL]] : f32
+// CHECKPARALLEL: %[[CMP:.*]] = cmpf olt, %[[RHS]], %[[SEL]] : f32
// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : f32
// CHECKPARALLEL: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xf32>
@@ -671,7 +671,7 @@ func @pooling_min_padding_i32(%arg0: memref<?x?xi32>,
// CHECKLOOP: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKLOOP: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
// CHECKLOOP: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-// CHECKLOOP: %[[CMP:.*]] = cmpi "slt", %[[RHS]], %[[SEL]] : i32
+// CHECKLOOP: %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32
// CHECKLOOP: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
// CHECKLOOP: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xi32>
@@ -691,7 +691,7 @@ func @pooling_min_padding_i32(%arg0: memref<?x?xi32>,
// CHECKPARALLEL: %[[IDY:.*]] = affine.max #[[$clampMinMap]](%[[IY]])
// CHECKPARALLEL: %[[LHS:.*]] = load %{{.*}}[%[[IDX]], %[[IDY]]] : memref<?x?xi32>
// CHECKPARALLEL: %[[SEL:.*]] = select %{{.*}}, %[[PAD]], %[[LHS]] : i32
-// CHECKPARALLEL: %[[CMP:.*]] = cmpi "slt", %[[RHS]], %[[SEL]] : i32
+// CHECKPARALLEL: %[[CMP:.*]] = cmpi slt, %[[RHS]], %[[SEL]] : i32
// CHECKPARALLEL: %[[RES:.*]] = select %{{.*}}, %[[RHS]], %[[SEL]] : i32
// CHECKPARALLEL: store %[[RES]], %{{.*}}[%{{.*}}, %{{.*}}] : memref<?x?xi32>
@@ -1074,7 +1074,7 @@ func @indexed_generic_op_1D_reduce(%arg0: memref<?xf32>,
outs(%arg2 : memref<f32>) {
^bb(%i : index, %a: f32, %b: f32, %c: f32) :
%0 = constant 0 : index
- %1 = cmpi "eq", %0, %i : index
+ %1 = cmpi eq, %0, %i : index
%2 = select %1, %b, %c : f32
%3 = addf %a, %2 : f32
linalg.yield %3 : f32
diff --git a/mlir/test/Dialect/Linalg/sparse_1d.mlir b/mlir/test/Dialect/Linalg/sparse_1d.mlir
index 4baf1d1c1403..0d471244f77a 100644
--- a/mlir/test/Dialect/Linalg/sparse_1d.mlir
+++ b/mlir/test/Dialect/Linalg/sparse_1d.mlir
@@ -96,12 +96,12 @@ func @mul_d(%arga: tensor<32xf32>, %argb: f32) -> tensor<32xf32> {
// CHECK: %[[VAL_11:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]]:2 = scf.while (%[[VAL_14:.*]] = %[[VAL_11]], %[[VAL_15:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_16:.*]] = cmpi "ult", %[[VAL_14]], %[[VAL_12]] : index
+// CHECK: %[[VAL_16:.*]] = cmpi ult, %[[VAL_14]], %[[VAL_12]] : index
// CHECK: scf.condition(%[[VAL_16]]) %[[VAL_14]], %[[VAL_15]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_17:.*]]: index, %[[VAL_18:.*]]: index):
// CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
-// CHECK: %[[VAL_20:.*]] = cmpi "eq", %[[VAL_19]], %[[VAL_18]] : index
+// CHECK: %[[VAL_20:.*]] = cmpi eq, %[[VAL_19]], %[[VAL_18]] : index
// CHECK: scf.if %[[VAL_20]] {
// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<?xf32>
// CHECK: %[[VAL_22:.*]] = addf %[[VAL_21]], %[[VAL_1]] : f32
@@ -112,7 +112,7 @@ func @mul_d(%arga: tensor<32xf32>, %argb: f32) -> tensor<32xf32> {
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_23:.*]] = cmpi "eq", %[[VAL_19]], %[[VAL_18]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi eq, %[[VAL_19]], %[[VAL_18]] : index
// CHECK: %[[VAL_24:.*]] = addi %[[VAL_17]], %[[VAL_6]] : index
// CHECK: %[[VAL_25:.*]] = select %[[VAL_23]], %[[VAL_24]], %[[VAL_17]] : index
// CHECK: %[[VAL_26:.*]] = addi %[[VAL_18]], %[[VAL_6]] : index
@@ -309,12 +309,12 @@ func @mul_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_12:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_17:.*]] = cmpi "ult", %[[VAL_15]], %[[VAL_13]] : index
+// CHECK: %[[VAL_17:.*]] = cmpi ult, %[[VAL_15]], %[[VAL_13]] : index
// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index):
// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]] = cmpi "eq", %[[VAL_20]], %[[VAL_19]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: scf.if %[[VAL_21]] {
// CHECK: %[[VAL_22:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_19]]] : memref<32xf32>
// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xf32>
@@ -327,7 +327,7 @@ func @mul_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_26:.*]] = cmpi "eq", %[[VAL_20]], %[[VAL_19]] : index
+// CHECK: %[[VAL_26:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = addi %[[VAL_18]], %[[VAL_6]] : index
// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
@@ -416,12 +416,12 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_17:.*]] = cmpi "ult", %[[VAL_15]], %[[VAL_13]] : index
+// CHECK: %[[VAL_17:.*]] = cmpi ult, %[[VAL_15]], %[[VAL_13]] : index
// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index):
// CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
-// CHECK: %[[VAL_21:.*]] = cmpi "eq", %[[VAL_20]], %[[VAL_19]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: scf.if %[[VAL_21]] {
// CHECK: %[[VAL_22:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf32>
// CHECK: %[[VAL_23:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf32>
@@ -434,7 +434,7 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_26:.*]] = cmpi "eq", %[[VAL_20]], %[[VAL_19]] : index
+// CHECK: %[[VAL_26:.*]] = cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = addi %[[VAL_18]], %[[VAL_6]] : index
// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = addi %[[VAL_19]], %[[VAL_6]] : index
@@ -525,18 +525,18 @@ func @mul_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]]:2 = scf.while (%[[VAL_17:.*]] = %[[VAL_12]], %[[VAL_18:.*]] = %[[VAL_14]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_19:.*]] = cmpi "ult", %[[VAL_17]], %[[VAL_13]] : index
-// CHECK: %[[VAL_20:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_15]] : index
+// CHECK: %[[VAL_19:.*]] = cmpi ult, %[[VAL_17]], %[[VAL_13]] : index
+// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_15]] : index
// CHECK: %[[VAL_21:.*]] = and %[[VAL_19]], %[[VAL_20]] : i1
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_17]], %[[VAL_18]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
// CHECK: %[[VAL_24:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_22]]] : memref<?xindex>
// CHECK: %[[VAL_25:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = cmpi "ult", %[[VAL_25]], %[[VAL_24]] : index
+// CHECK: %[[VAL_26:.*]] = cmpi ult, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: %[[VAL_27:.*]] = select %[[VAL_26]], %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_28:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_27]] : index
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_28:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_27]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_27]] : index
// CHECK: %[[VAL_30:.*]] = and %[[VAL_28]], %[[VAL_29]] : i1
// CHECK: scf.if %[[VAL_30]] {
// CHECK: %[[VAL_31:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_22]]] : memref<?xf32>
@@ -544,12 +544,12 @@ func @mul_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_33:.*]] = addf %[[VAL_31]], %[[VAL_32]] : f32
// CHECK: store %[[VAL_33]], %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<32xf32>
// CHECK: } else {
-// CHECK: %[[VAL_34:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_27]] : index
+// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_22]]] : memref<?xf32>
// CHECK: store %[[VAL_35]], %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<32xf32>
// CHECK: } else {
-// CHECK: %[[VAL_36:.*]] = cmpi "eq", %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_27]] : index
// CHECK: scf.if %[[VAL_36]] {
// CHECK: %[[VAL_37:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xf32>
// CHECK: store %[[VAL_37]], %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<32xf32>
@@ -557,10 +557,10 @@ func @mul_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_38:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_27]] : index
+// CHECK: %[[VAL_38:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_27]] : index
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_22]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = select %[[VAL_38]], %[[VAL_39]], %[[VAL_22]] : index
-// CHECK: %[[VAL_41:.*]] = cmpi "eq", %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_27]] : index
// CHECK: %[[VAL_42:.*]] = addi %[[VAL_23]], %[[VAL_4]] : index
// CHECK: %[[VAL_43:.*]] = select %[[VAL_41]], %[[VAL_42]], %[[VAL_23]] : index
// CHECK: scf.yield %[[VAL_40]], %[[VAL_43]] : index, index
@@ -607,18 +607,18 @@ func @add_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]]:2 = scf.while (%[[VAL_17:.*]] = %[[VAL_12]], %[[VAL_18:.*]] = %[[VAL_14]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_19:.*]] = cmpi "ult", %[[VAL_17]], %[[VAL_13]] : index
-// CHECK: %[[VAL_20:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_15]] : index
+// CHECK: %[[VAL_19:.*]] = cmpi ult, %[[VAL_17]], %[[VAL_13]] : index
+// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_15]] : index
// CHECK: %[[VAL_21:.*]] = and %[[VAL_19]], %[[VAL_20]] : i1
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_17]], %[[VAL_18]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
// CHECK: %[[VAL_24:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_22]]] : memref<?xindex>
// CHECK: %[[VAL_25:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex>
-// CHECK: %[[VAL_26:.*]] = cmpi "ult", %[[VAL_25]], %[[VAL_24]] : index
+// CHECK: %[[VAL_26:.*]] = cmpi ult, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: %[[VAL_27:.*]] = select %[[VAL_26]], %[[VAL_25]], %[[VAL_24]] : index
-// CHECK: %[[VAL_28:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_27]] : index
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_28:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_27]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_27]] : index
// CHECK: %[[VAL_30:.*]] = and %[[VAL_28]], %[[VAL_29]] : i1
// CHECK: scf.if %[[VAL_30]] {
// CHECK: %[[VAL_31:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_22]]] : memref<?xf32>
@@ -627,10 +627,10 @@ func @add_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: store %[[VAL_33]], %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<32xf32>
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_34:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_27]] : index
+// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_27]] : index
// CHECK: %[[VAL_35:.*]] = addi %[[VAL_22]], %[[VAL_4]] : index
// CHECK: %[[VAL_36:.*]] = select %[[VAL_34]], %[[VAL_35]], %[[VAL_22]] : index
-// CHECK: %[[VAL_37:.*]] = cmpi "eq", %[[VAL_25]], %[[VAL_27]] : index
+// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_25]], %[[VAL_27]] : index
// CHECK: %[[VAL_38:.*]] = addi %[[VAL_23]], %[[VAL_4]] : index
// CHECK: %[[VAL_39:.*]] = select %[[VAL_37]], %[[VAL_38]], %[[VAL_23]] : index
// CHECK: scf.yield %[[VAL_36]], %[[VAL_39]] : index, index
@@ -683,16 +683,16 @@ func @mul_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:3 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_14]] : index
-// CHECK: %[[VAL_22:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_16]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
+// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: %[[VAL_23:.*]] = and %[[VAL_21]], %[[VAL_22]] : i1
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_18]], %[[VAL_19]], %[[VAL_20]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
// CHECK: %[[VAL_27:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_30:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
// CHECK: %[[VAL_32:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
@@ -702,13 +702,13 @@ func @mul_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: %[[VAL_36:.*]] = addf %[[VAL_33]], %[[VAL_35]] : f32
// CHECK: store %[[VAL_36]], %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_37:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_37]] {
// CHECK: %[[VAL_38:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_39:.*]] = mulf %[[VAL_38]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_39]], %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_40:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_40]] {
// CHECK: %[[VAL_41:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_42:.*]] = mulf %[[VAL_41]], %[[VAL_2]] : f32
@@ -717,10 +717,10 @@ func @mul_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>) -> tensor<32xf32> {
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_43:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_43:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_44:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
// CHECK: %[[VAL_45:.*]] = select %[[VAL_43]], %[[VAL_44]], %[[VAL_24]] : index
-// CHECK: %[[VAL_46:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_47:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_25]] : index
// CHECK: %[[VAL_49:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
@@ -773,16 +773,16 @@ func @two_way_inv(%arga: tensor<16xf32>,
// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:3 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_14]] : index
-// CHECK: %[[VAL_22:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_16]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
+// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: %[[VAL_23:.*]] = and %[[VAL_21]], %[[VAL_22]] : i1
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_18]], %[[VAL_19]], %[[VAL_20]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
// CHECK: %[[VAL_27:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_30:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
// CHECK: %[[VAL_32:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
@@ -791,13 +791,13 @@ func @two_way_inv(%arga: tensor<16xf32>,
// CHECK: %[[VAL_35:.*]] = mulf %[[VAL_34]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_35]], %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_36:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_36]] {
// CHECK: %[[VAL_37:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_38:.*]] = mulf %[[VAL_37]], %[[VAL_2]] : f32
// CHECK: store %[[VAL_38]], %[[VAL_12]]{{\[}}%[[VAL_26]]] : memref<16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_39:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_39:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_39]] {
// CHECK: %[[VAL_40:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_41:.*]] = mulf %[[VAL_40]], %[[VAL_2]] : f32
@@ -806,10 +806,10 @@ func @two_way_inv(%arga: tensor<16xf32>,
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_42:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_42:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_43:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
// CHECK: %[[VAL_44:.*]] = select %[[VAL_42]], %[[VAL_43]], %[[VAL_24]] : index
-// CHECK: %[[VAL_45:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_46:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_25]] : index
// CHECK: %[[VAL_48:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
@@ -923,16 +923,16 @@ func @sum_reduction(%arga: tensor<?xf32>, %argx: tensor<f32>) -> tensor<f32> {
// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:3 = scf.while (%[[VAL_18:.*]] = %[[VAL_13]], %[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_4]]) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_14]] : index
-// CHECK: %[[VAL_22:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_16]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_14]] : index
+// CHECK: %[[VAL_22:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_16]] : index
// CHECK: %[[VAL_23:.*]] = and %[[VAL_21]], %[[VAL_22]] : i1
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_18]], %[[VAL_19]], %[[VAL_20]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
// CHECK: %[[VAL_27:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_30:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_31:.*]] = and %[[VAL_29]], %[[VAL_30]] : i1
// CHECK: scf.if %[[VAL_31]] {
// CHECK: %[[VAL_32:.*]] = load %[[VAL_12]][] : memref<f32>
@@ -942,14 +942,14 @@ func @sum_reduction(%arga: tensor<?xf32>, %argx: tensor<f32>) -> tensor<f32> {
// CHECK: %[[VAL_36:.*]] = addf %[[VAL_32]], %[[VAL_35]] : f32
// CHECK: store %[[VAL_36]], %[[VAL_12]][] : memref<f32>
// CHECK: } else {
-// CHECK: %[[VAL_37:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_37:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_37]] {
// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]][] : memref<f32>
// CHECK: %[[VAL_39:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_24]]] : memref<?xf32>
// CHECK: %[[VAL_40:.*]] = addf %[[VAL_38]], %[[VAL_39]] : f32
// CHECK: store %[[VAL_40]], %[[VAL_12]][] : memref<f32>
// CHECK: } else {
-// CHECK: %[[VAL_41:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_41]] {
// CHECK: %[[VAL_42:.*]] = load %[[VAL_12]][] : memref<f32>
// CHECK: %[[VAL_43:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
@@ -959,10 +959,10 @@ func @sum_reduction(%arga: tensor<?xf32>, %argx: tensor<f32>) -> tensor<f32> {
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_45:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_46:.*]] = addi %[[VAL_24]], %[[VAL_5]] : index
// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_24]] : index
-// CHECK: %[[VAL_48:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_26]] : index
+// CHECK: %[[VAL_48:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_26]] : index
// CHECK: %[[VAL_49:.*]] = addi %[[VAL_25]], %[[VAL_5]] : index
// CHECK: %[[VAL_50:.*]] = select %[[VAL_48]], %[[VAL_49]], %[[VAL_25]] : index
// CHECK: %[[VAL_51:.*]] = addi %[[VAL_26]], %[[VAL_5]] : index
@@ -1038,16 +1038,16 @@ func @sum_reduction_ss(%arga: tensor<16xf32>,
// CHECK: %[[VAL_18:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:3 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]], %[[VAL_23:.*]] = %[[VAL_5]]) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_17]] : index
-// CHECK: %[[VAL_25:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_19]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
+// CHECK: %[[VAL_25:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_26:.*]] = and %[[VAL_24]], %[[VAL_25]] : i1
// CHECK: scf.condition(%[[VAL_26]]) %[[VAL_21]], %[[VAL_22]], %[[VAL_23]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_27:.*]]: index, %[[VAL_28:.*]]: index, %[[VAL_29:.*]]: index):
// CHECK: %[[VAL_30:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_28]]] : memref<?xindex>
-// CHECK: %[[VAL_32:.*]] = cmpi "eq", %[[VAL_30]], %[[VAL_29]] : index
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_31]], %[[VAL_29]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_14]][] : memref<f32>
@@ -1058,7 +1058,7 @@ func @sum_reduction_ss(%arga: tensor<16xf32>,
// CHECK: %[[VAL_40:.*]] = addf %[[VAL_35]], %[[VAL_39]] : f32
// CHECK: store %[[VAL_40]], %[[VAL_14]][] : memref<f32>
// CHECK: } else {
-// CHECK: %[[VAL_41:.*]] = cmpi "eq", %[[VAL_30]], %[[VAL_29]] : index
+// CHECK: %[[VAL_41:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: scf.if %[[VAL_41]] {
// CHECK: %[[VAL_42:.*]] = load %[[VAL_14]][] : memref<f32>
// CHECK: %[[VAL_43:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_27]]] : memref<?xf32>
@@ -1066,7 +1066,7 @@ func @sum_reduction_ss(%arga: tensor<16xf32>,
// CHECK: %[[VAL_45:.*]] = addf %[[VAL_42]], %[[VAL_44]] : f32
// CHECK: store %[[VAL_45]], %[[VAL_14]][] : memref<f32>
// CHECK: } else {
-// CHECK: %[[VAL_46:.*]] = cmpi "eq", %[[VAL_31]], %[[VAL_29]] : index
+// CHECK: %[[VAL_46:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: scf.if %[[VAL_46]] {
// CHECK: %[[VAL_47:.*]] = load %[[VAL_14]][] : memref<f32>
// CHECK: %[[VAL_48:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_28]]] : memref<?xf32>
@@ -1076,10 +1076,10 @@ func @sum_reduction_ss(%arga: tensor<16xf32>,
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_50:.*]] = cmpi "eq", %[[VAL_30]], %[[VAL_29]] : index
+// CHECK: %[[VAL_50:.*]] = cmpi eq, %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_51:.*]] = addi %[[VAL_27]], %[[VAL_6]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_50]], %[[VAL_51]], %[[VAL_27]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "eq", %[[VAL_31]], %[[VAL_29]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_31]], %[[VAL_29]] : index
// CHECK: %[[VAL_54:.*]] = addi %[[VAL_28]], %[[VAL_6]] : index
// CHECK: %[[VAL_55:.*]] = select %[[VAL_53]], %[[VAL_54]], %[[VAL_28]] : index
// CHECK: %[[VAL_56:.*]] = addi %[[VAL_29]], %[[VAL_6]] : index
diff --git a/mlir/test/Dialect/Linalg/sparse_2d.mlir b/mlir/test/Dialect/Linalg/sparse_2d.mlir
index 9bb68ca91089..57805777eb46 100644
--- a/mlir/test/Dialect/Linalg/sparse_2d.mlir
+++ b/mlir/test/Dialect/Linalg/sparse_2d.mlir
@@ -114,12 +114,12 @@ func @mul_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_15:.*]] = addi %[[VAL_13]], %[[VAL_7]] : index
// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_14]], %[[VAL_19:.*]] = %[[VAL_5]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_20:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_16]] : index
+// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_16]] : index
// CHECK: scf.condition(%[[VAL_20]]) %[[VAL_18]], %[[VAL_19]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_21:.*]]: index, %[[VAL_22:.*]]: index):
// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK: %[[VAL_24:.*]] = cmpi "eq", %[[VAL_23]], %[[VAL_22]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: scf.if %[[VAL_24]] {
// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xf32>
// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_13]], %[[VAL_22]]] : memref<32x16xf32>
@@ -132,7 +132,7 @@ func @mul_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_29:.*]] = cmpi "eq", %[[VAL_23]], %[[VAL_22]] : index
+// CHECK: %[[VAL_29:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: %[[VAL_30:.*]] = addi %[[VAL_21]], %[[VAL_7]] : index
// CHECK: %[[VAL_31:.*]] = select %[[VAL_29]], %[[VAL_30]], %[[VAL_21]] : index
// CHECK: %[[VAL_32:.*]] = addi %[[VAL_22]], %[[VAL_7]] : index
@@ -227,12 +227,12 @@ func @mul_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_13:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_18:.*]] = cmpi "ult", %[[VAL_16]], %[[VAL_14]] : index
+// CHECK: %[[VAL_18:.*]] = cmpi ult, %[[VAL_16]], %[[VAL_14]] : index
// CHECK: scf.condition(%[[VAL_18]]) %[[VAL_16]], %[[VAL_17]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_19:.*]]: index, %[[VAL_20:.*]]: index):
// CHECK: %[[VAL_21:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
-// CHECK: %[[VAL_22:.*]] = cmpi "eq", %[[VAL_21]], %[[VAL_20]] : index
+// CHECK: %[[VAL_22:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: scf.if %[[VAL_22]] {
// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
// CHECK: %[[VAL_24:.*]] = muli %[[VAL_19]], %[[VAL_4]] : index
@@ -251,7 +251,7 @@ func @mul_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_31:.*]] = cmpi "eq", %[[VAL_21]], %[[VAL_20]] : index
+// CHECK: %[[VAL_31:.*]] = cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: %[[VAL_32:.*]] = addi %[[VAL_19]], %[[VAL_7]] : index
// CHECK: %[[VAL_33:.*]] = select %[[VAL_31]], %[[VAL_32]], %[[VAL_19]] : index
// CHECK: %[[VAL_34:.*]] = addi %[[VAL_20]], %[[VAL_7]] : index
@@ -350,23 +350,23 @@ func @mul_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_15:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_15]], %[[VAL_19:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_20:.*]] = cmpi "ult", %[[VAL_18]], %[[VAL_16]] : index
+// CHECK: %[[VAL_20:.*]] = cmpi ult, %[[VAL_18]], %[[VAL_16]] : index
// CHECK: scf.condition(%[[VAL_20]]) %[[VAL_18]], %[[VAL_19]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_21:.*]]: index, %[[VAL_22:.*]]: index):
// CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
-// CHECK: %[[VAL_24:.*]] = cmpi "eq", %[[VAL_23]], %[[VAL_22]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: scf.if %[[VAL_24]] {
// CHECK: %[[VAL_25:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_21]]] : memref<?xindex>
// CHECK: %[[VAL_26:.*]] = addi %[[VAL_21]], %[[VAL_7]] : index
// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]]:2 = scf.while (%[[VAL_29:.*]] = %[[VAL_25]], %[[VAL_30:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_31:.*]] = cmpi "ult", %[[VAL_29]], %[[VAL_27]] : index
+// CHECK: %[[VAL_31:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_27]] : index
// CHECK: scf.condition(%[[VAL_31]]) %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_32:.*]]: index, %[[VAL_33:.*]]: index):
// CHECK: %[[VAL_34:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_32]]] : memref<?xindex>
-// CHECK: %[[VAL_35:.*]] = cmpi "eq", %[[VAL_34]], %[[VAL_33]] : index
+// CHECK: %[[VAL_35:.*]] = cmpi eq, %[[VAL_34]], %[[VAL_33]] : index
// CHECK: scf.if %[[VAL_35]] {
// CHECK: %[[VAL_36:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_32]]] : memref<?xf32>
// CHECK: %[[VAL_37:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_22]], %[[VAL_33]]] : memref<32x16xf32>
@@ -379,7 +379,7 @@ func @mul_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_40:.*]] = cmpi "eq", %[[VAL_34]], %[[VAL_33]] : index
+// CHECK: %[[VAL_40:.*]] = cmpi eq, %[[VAL_34]], %[[VAL_33]] : index
// CHECK: %[[VAL_41:.*]] = addi %[[VAL_32]], %[[VAL_7]] : index
// CHECK: %[[VAL_42:.*]] = select %[[VAL_40]], %[[VAL_41]], %[[VAL_32]] : index
// CHECK: %[[VAL_43:.*]] = addi %[[VAL_33]], %[[VAL_7]] : index
@@ -398,7 +398,7 @@ func @mul_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_49:.*]] = cmpi "eq", %[[VAL_23]], %[[VAL_22]] : index
+// CHECK: %[[VAL_49:.*]] = cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: %[[VAL_50:.*]] = addi %[[VAL_21]], %[[VAL_7]] : index
// CHECK: %[[VAL_51:.*]] = select %[[VAL_49]], %[[VAL_50]], %[[VAL_21]] : index
// CHECK: %[[VAL_52:.*]] = addi %[[VAL_22]], %[[VAL_7]] : index
@@ -503,18 +503,18 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_17]] : index
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_25:.*]] = and %[[VAL_23]], %[[VAL_24]] : i1
// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
// CHECK: %[[VAL_28:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = cmpi "ult", %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_32:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
@@ -524,18 +524,18 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
// CHECK: %[[VAL_41:.*]]:2 = scf.while (%[[VAL_42:.*]] = %[[VAL_35]], %[[VAL_43:.*]] = %[[VAL_38]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_44:.*]] = cmpi "ult", %[[VAL_42]], %[[VAL_37]] : index
-// CHECK: %[[VAL_45:.*]] = cmpi "ult", %[[VAL_43]], %[[VAL_40]] : index
+// CHECK: %[[VAL_44:.*]] = cmpi ult, %[[VAL_42]], %[[VAL_37]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_40]] : index
// CHECK: %[[VAL_46:.*]] = and %[[VAL_44]], %[[VAL_45]] : i1
// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_42]], %[[VAL_43]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_47:.*]]: index, %[[VAL_48:.*]]: index):
// CHECK: %[[VAL_49:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_47]]] : memref<?xindex>
// CHECK: %[[VAL_50:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = cmpi "ult", %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: %[[VAL_51:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_49]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_51]], %[[VAL_50]], %[[VAL_49]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_55:.*]] = and %[[VAL_53]], %[[VAL_54]] : i1
// CHECK: scf.if %[[VAL_55]] {
// CHECK: %[[VAL_56:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
@@ -543,12 +543,12 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: %[[VAL_58:.*]] = addf %[[VAL_56]], %[[VAL_57]] : f32
// CHECK: store %[[VAL_58]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_59:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_59:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: scf.if %[[VAL_59]] {
// CHECK: %[[VAL_60:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
// CHECK: store %[[VAL_60]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_61:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_61:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: scf.if %[[VAL_61]] {
// CHECK: %[[VAL_62:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_48]]] : memref<?xf32>
// CHECK: store %[[VAL_62]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
@@ -556,10 +556,10 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_63:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: %[[VAL_64:.*]] = addi %[[VAL_47]], %[[VAL_4]] : index
// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_47]] : index
-// CHECK: %[[VAL_66:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_66:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_67:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_68:.*]] = select %[[VAL_66]], %[[VAL_67]], %[[VAL_48]] : index
// CHECK: scf.yield %[[VAL_65]], %[[VAL_68]] : index, index
@@ -575,7 +575,7 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: store %[[VAL_76]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_75]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: %[[VAL_77:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_77:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: scf.if %[[VAL_77]] {
// CHECK: %[[VAL_78:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_79:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
@@ -586,7 +586,7 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: store %[[VAL_83]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_82]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: %[[VAL_84:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_84:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: scf.if %[[VAL_84]] {
// CHECK: %[[VAL_85:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_86:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
@@ -600,10 +600,10 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32x16
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_91:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_91:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_92:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
// CHECK: %[[VAL_93:.*]] = select %[[VAL_91]], %[[VAL_92]], %[[VAL_26]] : index
-// CHECK: %[[VAL_94:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_94:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_95:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_96:.*]] = select %[[VAL_94]], %[[VAL_95]], %[[VAL_27]] : index
// CHECK: scf.yield %[[VAL_93]], %[[VAL_96]] : index, index
@@ -666,18 +666,18 @@ func @add_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_17]] : index
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_25:.*]] = and %[[VAL_23]], %[[VAL_24]] : i1
// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
// CHECK: %[[VAL_28:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = cmpi "ult", %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_32:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
@@ -687,18 +687,18 @@ func @add_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
// CHECK: %[[VAL_41:.*]]:2 = scf.while (%[[VAL_42:.*]] = %[[VAL_35]], %[[VAL_43:.*]] = %[[VAL_38]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_44:.*]] = cmpi "ult", %[[VAL_42]], %[[VAL_37]] : index
-// CHECK: %[[VAL_45:.*]] = cmpi "ult", %[[VAL_43]], %[[VAL_40]] : index
+// CHECK: %[[VAL_44:.*]] = cmpi ult, %[[VAL_42]], %[[VAL_37]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_40]] : index
// CHECK: %[[VAL_46:.*]] = and %[[VAL_44]], %[[VAL_45]] : i1
// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_42]], %[[VAL_43]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_47:.*]]: index, %[[VAL_48:.*]]: index):
// CHECK: %[[VAL_49:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_47]]] : memref<?xindex>
// CHECK: %[[VAL_50:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = cmpi "ult", %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: %[[VAL_51:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_49]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_51]], %[[VAL_50]], %[[VAL_49]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_55:.*]] = and %[[VAL_53]], %[[VAL_54]] : i1
// CHECK: scf.if %[[VAL_55]] {
// CHECK: %[[VAL_56:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
@@ -707,20 +707,20 @@ func @add_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: store %[[VAL_58]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_59:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_59:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: %[[VAL_60:.*]] = addi %[[VAL_47]], %[[VAL_4]] : index
// CHECK: %[[VAL_61:.*]] = select %[[VAL_59]], %[[VAL_60]], %[[VAL_47]] : index
-// CHECK: %[[VAL_62:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_62:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_63:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_64:.*]] = select %[[VAL_62]], %[[VAL_63]], %[[VAL_48]] : index
// CHECK: scf.yield %[[VAL_61]], %[[VAL_64]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_65:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_65:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_66:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
// CHECK: %[[VAL_67:.*]] = select %[[VAL_65]], %[[VAL_66]], %[[VAL_26]] : index
-// CHECK: %[[VAL_68:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_68:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_69:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_70:.*]] = select %[[VAL_68]], %[[VAL_69]], %[[VAL_27]] : index
// CHECK: scf.yield %[[VAL_67]], %[[VAL_70]] : index, index
@@ -776,18 +776,18 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_17]] : index
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_25:.*]] = and %[[VAL_23]], %[[VAL_24]] : i1
// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
// CHECK: %[[VAL_28:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = cmpi "ult", %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_32:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
@@ -797,18 +797,18 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
// CHECK: %[[VAL_41:.*]]:2 = scf.while (%[[VAL_42:.*]] = %[[VAL_35]], %[[VAL_43:.*]] = %[[VAL_38]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_44:.*]] = cmpi "ult", %[[VAL_42]], %[[VAL_37]] : index
-// CHECK: %[[VAL_45:.*]] = cmpi "ult", %[[VAL_43]], %[[VAL_40]] : index
+// CHECK: %[[VAL_44:.*]] = cmpi ult, %[[VAL_42]], %[[VAL_37]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_40]] : index
// CHECK: %[[VAL_46:.*]] = and %[[VAL_44]], %[[VAL_45]] : i1
// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_42]], %[[VAL_43]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_47:.*]]: index, %[[VAL_48:.*]]: index):
// CHECK: %[[VAL_49:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_47]]] : memref<?xindex>
// CHECK: %[[VAL_50:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = cmpi "ult", %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: %[[VAL_51:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_49]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_51]], %[[VAL_50]], %[[VAL_49]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_55:.*]] = and %[[VAL_53]], %[[VAL_54]] : i1
// CHECK: scf.if %[[VAL_55]] {
// CHECK: %[[VAL_56:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
@@ -816,12 +816,12 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_58:.*]] = addf %[[VAL_56]], %[[VAL_57]] : f32
// CHECK: store %[[VAL_58]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_59:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_59:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: scf.if %[[VAL_59]] {
// CHECK: %[[VAL_60:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
// CHECK: store %[[VAL_60]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
-// CHECK: %[[VAL_61:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_61:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: scf.if %[[VAL_61]] {
// CHECK: %[[VAL_62:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_48]]] : memref<?xf32>
// CHECK: store %[[VAL_62]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
@@ -829,10 +829,10 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_63:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: %[[VAL_64:.*]] = addi %[[VAL_47]], %[[VAL_4]] : index
// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_47]] : index
-// CHECK: %[[VAL_66:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_66:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_67:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_68:.*]] = select %[[VAL_66]], %[[VAL_67]], %[[VAL_48]] : index
// CHECK: scf.yield %[[VAL_65]], %[[VAL_68]] : index, index
@@ -848,7 +848,7 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: store %[[VAL_76]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_75]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: %[[VAL_77:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_77:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: scf.if %[[VAL_77]] {
// CHECK: %[[VAL_78:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_79:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
@@ -859,7 +859,7 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: store %[[VAL_83]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_82]]] : memref<32x16xf32>
// CHECK: }
// CHECK: } else {
-// CHECK: %[[VAL_84:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_84:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: scf.if %[[VAL_84]] {
// CHECK: %[[VAL_85:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_86:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
@@ -873,10 +873,10 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_91:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_91:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_92:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
// CHECK: %[[VAL_93:.*]] = select %[[VAL_91]], %[[VAL_92]], %[[VAL_26]] : index
-// CHECK: %[[VAL_94:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_94:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_95:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_96:.*]] = select %[[VAL_94]], %[[VAL_95]], %[[VAL_27]] : index
// CHECK: scf.yield %[[VAL_93]], %[[VAL_96]] : index, index
@@ -939,18 +939,18 @@ func @add_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_18:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_17]] : index
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_17]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_19]] : index
// CHECK: %[[VAL_25:.*]] = and %[[VAL_23]], %[[VAL_24]] : i1
// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index):
// CHECK: %[[VAL_28:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
-// CHECK: %[[VAL_30:.*]] = cmpi "ult", %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_30:.*]] = cmpi ult, %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_32:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_34:.*]] = and %[[VAL_32]], %[[VAL_33]] : i1
// CHECK: scf.if %[[VAL_34]] {
// CHECK: %[[VAL_35:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_26]]] : memref<?xindex>
@@ -960,18 +960,18 @@ func @add_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_40:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_39]]] : memref<?xindex>
// CHECK: %[[VAL_41:.*]]:2 = scf.while (%[[VAL_42:.*]] = %[[VAL_35]], %[[VAL_43:.*]] = %[[VAL_38]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_44:.*]] = cmpi "ult", %[[VAL_42]], %[[VAL_37]] : index
-// CHECK: %[[VAL_45:.*]] = cmpi "ult", %[[VAL_43]], %[[VAL_40]] : index
+// CHECK: %[[VAL_44:.*]] = cmpi ult, %[[VAL_42]], %[[VAL_37]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_40]] : index
// CHECK: %[[VAL_46:.*]] = and %[[VAL_44]], %[[VAL_45]] : i1
// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_42]], %[[VAL_43]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_47:.*]]: index, %[[VAL_48:.*]]: index):
// CHECK: %[[VAL_49:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_47]]] : memref<?xindex>
// CHECK: %[[VAL_50:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_48]]] : memref<?xindex>
-// CHECK: %[[VAL_51:.*]] = cmpi "ult", %[[VAL_50]], %[[VAL_49]] : index
+// CHECK: %[[VAL_51:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_49]] : index
// CHECK: %[[VAL_52:.*]] = select %[[VAL_51]], %[[VAL_50]], %[[VAL_49]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_55:.*]] = and %[[VAL_53]], %[[VAL_54]] : i1
// CHECK: scf.if %[[VAL_55]] {
// CHECK: %[[VAL_56:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_47]]] : memref<?xf32>
@@ -980,20 +980,20 @@ func @add_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>) -> tensor<32
// CHECK: store %[[VAL_58]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_52]]] : memref<32x16xf32>
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_59:.*]] = cmpi "eq", %[[VAL_49]], %[[VAL_52]] : index
+// CHECK: %[[VAL_59:.*]] = cmpi eq, %[[VAL_49]], %[[VAL_52]] : index
// CHECK: %[[VAL_60:.*]] = addi %[[VAL_47]], %[[VAL_4]] : index
// CHECK: %[[VAL_61:.*]] = select %[[VAL_59]], %[[VAL_60]], %[[VAL_47]] : index
-// CHECK: %[[VAL_62:.*]] = cmpi "eq", %[[VAL_50]], %[[VAL_52]] : index
+// CHECK: %[[VAL_62:.*]] = cmpi eq, %[[VAL_50]], %[[VAL_52]] : index
// CHECK: %[[VAL_63:.*]] = addi %[[VAL_48]], %[[VAL_4]] : index
// CHECK: %[[VAL_64:.*]] = select %[[VAL_62]], %[[VAL_63]], %[[VAL_48]] : index
// CHECK: scf.yield %[[VAL_61]], %[[VAL_64]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_65:.*]] = cmpi "eq", %[[VAL_28]], %[[VAL_31]] : index
+// CHECK: %[[VAL_65:.*]] = cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_66:.*]] = addi %[[VAL_26]], %[[VAL_4]] : index
// CHECK: %[[VAL_67:.*]] = select %[[VAL_65]], %[[VAL_66]], %[[VAL_26]] : index
-// CHECK: %[[VAL_68:.*]] = cmpi "eq", %[[VAL_29]], %[[VAL_31]] : index
+// CHECK: %[[VAL_68:.*]] = cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_69:.*]] = addi %[[VAL_27]], %[[VAL_4]] : index
// CHECK: %[[VAL_70:.*]] = select %[[VAL_68]], %[[VAL_69]], %[[VAL_27]] : index
// CHECK: scf.yield %[[VAL_67]], %[[VAL_70]] : index, index
@@ -1302,12 +1302,12 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]]:2 = scf.while (%[[VAL_30:.*]] = %[[VAL_27]], %[[VAL_31:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_32:.*]] = cmpi "ult", %[[VAL_30]], %[[VAL_28]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_28]] : index
// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_30]], %[[VAL_31]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index):
// CHECK: %[[VAL_35:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_33]]] : memref<?xindex>
-// CHECK: %[[VAL_36:.*]] = cmpi "eq", %[[VAL_35]], %[[VAL_34]] : index
+// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: scf.if %[[VAL_36]] {
// CHECK: %[[VAL_37:.*]] = load %[[VAL_22]]{{\[}}%[[VAL_34]]] : memref<?xf32>
// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_33]]] : memref<?xindex>
@@ -1320,10 +1320,10 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: %[[VAL_45:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
// CHECK: %[[VAL_46:.*]] = load %[[VAL_18]]{{\[}}%[[VAL_45]]] : memref<?xindex>
// CHECK: %[[VAL_47:.*]]:4 = scf.while (%[[VAL_48:.*]] = %[[VAL_38]], %[[VAL_49:.*]] = %[[VAL_41]], %[[VAL_50:.*]] = %[[VAL_44]], %[[VAL_51:.*]] = %[[VAL_7]]) : (index, index, index, index) -> (index, index, index, index) {
-// CHECK: %[[VAL_52:.*]] = cmpi "ult", %[[VAL_48]], %[[VAL_40]] : index
-// CHECK: %[[VAL_53:.*]] = cmpi "ult", %[[VAL_49]], %[[VAL_43]] : index
+// CHECK: %[[VAL_52:.*]] = cmpi ult, %[[VAL_48]], %[[VAL_40]] : index
+// CHECK: %[[VAL_53:.*]] = cmpi ult, %[[VAL_49]], %[[VAL_43]] : index
// CHECK: %[[VAL_54:.*]] = and %[[VAL_52]], %[[VAL_53]] : i1
-// CHECK: %[[VAL_55:.*]] = cmpi "ult", %[[VAL_50]], %[[VAL_46]] : index
+// CHECK: %[[VAL_55:.*]] = cmpi ult, %[[VAL_50]], %[[VAL_46]] : index
// CHECK: %[[VAL_56:.*]] = and %[[VAL_54]], %[[VAL_55]] : i1
// CHECK: scf.condition(%[[VAL_56]]) %[[VAL_48]], %[[VAL_49]], %[[VAL_50]], %[[VAL_51]] : index, index, index, index
// CHECK: } do {
@@ -1331,10 +1331,10 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: %[[VAL_61:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_57]]] : memref<?xindex>
// CHECK: %[[VAL_62:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_58]]] : memref<?xindex>
// CHECK: %[[VAL_63:.*]] = load %[[VAL_19]]{{\[}}%[[VAL_59]]] : memref<?xindex>
-// CHECK: %[[VAL_64:.*]] = cmpi "eq", %[[VAL_61]], %[[VAL_60]] : index
-// CHECK: %[[VAL_65:.*]] = cmpi "eq", %[[VAL_62]], %[[VAL_60]] : index
+// CHECK: %[[VAL_64:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
+// CHECK: %[[VAL_65:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
// CHECK: %[[VAL_66:.*]] = and %[[VAL_64]], %[[VAL_65]] : i1
-// CHECK: %[[VAL_67:.*]] = cmpi "eq", %[[VAL_63]], %[[VAL_60]] : index
+// CHECK: %[[VAL_67:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
// CHECK: %[[VAL_68:.*]] = and %[[VAL_66]], %[[VAL_67]] : i1
// CHECK: scf.if %[[VAL_68]] {
// CHECK: %[[VAL_69:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
@@ -1348,8 +1348,8 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: %[[VAL_77:.*]] = addf %[[VAL_69]], %[[VAL_76]] : f32
// CHECK: store %[[VAL_77]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
// CHECK: } else {
-// CHECK: %[[VAL_78:.*]] = cmpi "eq", %[[VAL_61]], %[[VAL_60]] : index
-// CHECK: %[[VAL_79:.*]] = cmpi "eq", %[[VAL_62]], %[[VAL_60]] : index
+// CHECK: %[[VAL_78:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
+// CHECK: %[[VAL_79:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
// CHECK: %[[VAL_80:.*]] = and %[[VAL_78]], %[[VAL_79]] : i1
// CHECK: scf.if %[[VAL_80]] {
// CHECK: %[[VAL_81:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
@@ -1361,7 +1361,7 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: %[[VAL_87:.*]] = addf %[[VAL_81]], %[[VAL_86]] : f32
// CHECK: store %[[VAL_87]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
// CHECK: } else {
-// CHECK: %[[VAL_88:.*]] = cmpi "eq", %[[VAL_63]], %[[VAL_60]] : index
+// CHECK: %[[VAL_88:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
// CHECK: scf.if %[[VAL_88]] {
// CHECK: %[[VAL_89:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
// CHECK: %[[VAL_90:.*]] = load %[[VAL_20]]{{\[}}%[[VAL_59]]] : memref<?xf32>
@@ -1371,29 +1371,29 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: }
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_92:.*]] = cmpi "eq", %[[VAL_61]], %[[VAL_60]] : index
+// CHECK: %[[VAL_92:.*]] = cmpi eq, %[[VAL_61]], %[[VAL_60]] : index
// CHECK: %[[VAL_93:.*]] = addi %[[VAL_57]], %[[VAL_9]] : index
// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_57]] : index
-// CHECK: %[[VAL_95:.*]] = cmpi "eq", %[[VAL_62]], %[[VAL_60]] : index
+// CHECK: %[[VAL_95:.*]] = cmpi eq, %[[VAL_62]], %[[VAL_60]] : index
// CHECK: %[[VAL_96:.*]] = addi %[[VAL_58]], %[[VAL_9]] : index
// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_58]] : index
-// CHECK: %[[VAL_98:.*]] = cmpi "eq", %[[VAL_63]], %[[VAL_60]] : index
+// CHECK: %[[VAL_98:.*]] = cmpi eq, %[[VAL_63]], %[[VAL_60]] : index
// CHECK: %[[VAL_99:.*]] = addi %[[VAL_59]], %[[VAL_9]] : index
// CHECK: %[[VAL_100:.*]] = select %[[VAL_98]], %[[VAL_99]], %[[VAL_59]] : index
// CHECK: %[[VAL_101:.*]] = addi %[[VAL_60]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]], %[[VAL_100]], %[[VAL_101]] : index, index, index, index
// CHECK: }
// CHECK: %[[VAL_102:.*]]:3 = scf.while (%[[VAL_103:.*]] = %[[VAL_104:.*]]#0, %[[VAL_105:.*]] = %[[VAL_104]]#1, %[[VAL_106:.*]] = %[[VAL_104]]#3) : (index, index, index) -> (index, index, index) {
-// CHECK: %[[VAL_107:.*]] = cmpi "ult", %[[VAL_103]], %[[VAL_40]] : index
-// CHECK: %[[VAL_108:.*]] = cmpi "ult", %[[VAL_105]], %[[VAL_43]] : index
+// CHECK: %[[VAL_107:.*]] = cmpi ult, %[[VAL_103]], %[[VAL_40]] : index
+// CHECK: %[[VAL_108:.*]] = cmpi ult, %[[VAL_105]], %[[VAL_43]] : index
// CHECK: %[[VAL_109:.*]] = and %[[VAL_107]], %[[VAL_108]] : i1
// CHECK: scf.condition(%[[VAL_109]]) %[[VAL_103]], %[[VAL_105]], %[[VAL_106]] : index, index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_110:.*]]: index, %[[VAL_111:.*]]: index, %[[VAL_112:.*]]: index):
// CHECK: %[[VAL_113:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_110]]] : memref<?xindex>
// CHECK: %[[VAL_114:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_111]]] : memref<?xindex>
-// CHECK: %[[VAL_115:.*]] = cmpi "eq", %[[VAL_113]], %[[VAL_112]] : index
-// CHECK: %[[VAL_116:.*]] = cmpi "eq", %[[VAL_114]], %[[VAL_112]] : index
+// CHECK: %[[VAL_115:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_112]] : index
+// CHECK: %[[VAL_116:.*]] = cmpi eq, %[[VAL_114]], %[[VAL_112]] : index
// CHECK: %[[VAL_117:.*]] = and %[[VAL_115]], %[[VAL_116]] : i1
// CHECK: scf.if %[[VAL_117]] {
// CHECK: %[[VAL_118:.*]] = load %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
@@ -1406,10 +1406,10 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: store %[[VAL_124]], %[[VAL_25]]{{\[}}%[[VAL_34]]] : memref<?xf32>
// CHECK: } else {
// CHECK: }
-// CHECK: %[[VAL_125:.*]] = cmpi "eq", %[[VAL_113]], %[[VAL_112]] : index
+// CHECK: %[[VAL_125:.*]] = cmpi eq, %[[VAL_113]], %[[VAL_112]] : index
// CHECK: %[[VAL_126:.*]] = addi %[[VAL_110]], %[[VAL_9]] : index
// CHECK: %[[VAL_127:.*]] = select %[[VAL_125]], %[[VAL_126]], %[[VAL_110]] : index
-// CHECK: %[[VAL_128:.*]] = cmpi "eq", %[[VAL_114]], %[[VAL_112]] : index
+// CHECK: %[[VAL_128:.*]] = cmpi eq, %[[VAL_114]], %[[VAL_112]] : index
// CHECK: %[[VAL_129:.*]] = addi %[[VAL_111]], %[[VAL_9]] : index
// CHECK: %[[VAL_130:.*]] = select %[[VAL_128]], %[[VAL_129]], %[[VAL_111]] : index
// CHECK: %[[VAL_131:.*]] = addi %[[VAL_112]], %[[VAL_9]] : index
@@ -1437,7 +1437,7 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_150:.*]] = cmpi "eq", %[[VAL_35]], %[[VAL_34]] : index
+// CHECK: %[[VAL_150:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: %[[VAL_151:.*]] = addi %[[VAL_33]], %[[VAL_9]] : index
// CHECK: %[[VAL_152:.*]] = select %[[VAL_150]], %[[VAL_151]], %[[VAL_33]] : index
// CHECK: %[[VAL_153:.*]] = addi %[[VAL_34]], %[[VAL_9]] : index
diff --git a/mlir/test/Dialect/Linalg/sparse_3d.mlir b/mlir/test/Dialect/Linalg/sparse_3d.mlir
index a32770e635e4..629a99d0a864 100644
--- a/mlir/test/Dialect/Linalg/sparse_3d.mlir
+++ b/mlir/test/Dialect/Linalg/sparse_3d.mlir
@@ -124,12 +124,12 @@ func @mul_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_19:.*]] = addi %[[VAL_17]], %[[VAL_8]] : index
// CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
// CHECK: %[[VAL_21:.*]]:2 = scf.while (%[[VAL_22:.*]] = %[[VAL_18]], %[[VAL_23:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_24:.*]] = cmpi "ult", %[[VAL_22]], %[[VAL_20]] : index
+// CHECK: %[[VAL_24:.*]] = cmpi ult, %[[VAL_22]], %[[VAL_20]] : index
// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_22]], %[[VAL_23]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index):
// CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
-// CHECK: %[[VAL_28:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_28:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: scf.if %[[VAL_28]] {
// CHECK: %[[VAL_29:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_25]]] : memref<?xf32>
// CHECK: %[[VAL_30:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_14]], %[[VAL_15]], %[[VAL_26]]] : memref<32x16x8xf32>
@@ -142,7 +142,7 @@ func @mul_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_33:.*]] = cmpi "eq", %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_33:.*]] = cmpi eq, %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_34:.*]] = addi %[[VAL_25]], %[[VAL_8]] : index
// CHECK: %[[VAL_35:.*]] = select %[[VAL_33]], %[[VAL_34]], %[[VAL_25]] : index
// CHECK: %[[VAL_36:.*]] = addi %[[VAL_26]], %[[VAL_8]] : index
@@ -246,12 +246,12 @@ func @mul_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_16:.*]] = addi %[[VAL_14]], %[[VAL_8]] : index
// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<?xindex>
// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_15]], %[[VAL_20:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_17]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_17]] : index
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_19]], %[[VAL_20]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_25:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_25:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: scf.if %[[VAL_25]] {
// CHECK: scf.for %[[VAL_26:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
// CHECK: %[[VAL_27:.*]] = muli %[[VAL_22]], %[[VAL_5]] : index
@@ -270,7 +270,7 @@ func @mul_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_34:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_34:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_35:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: %[[VAL_36:.*]] = select %[[VAL_34]], %[[VAL_35]], %[[VAL_22]] : index
// CHECK: %[[VAL_37:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
@@ -377,23 +377,23 @@ func @mul_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_18:.*]] = addi %[[VAL_16]], %[[VAL_8]] : index
// CHECK: %[[VAL_19:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_17]], %[[VAL_22:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_19]] : index
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index):
// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
-// CHECK: %[[VAL_27:.*]] = cmpi "eq", %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_27:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: scf.if %[[VAL_27]] {
// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_29]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]]:2 = scf.while (%[[VAL_32:.*]] = %[[VAL_28]], %[[VAL_33:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_34:.*]] = cmpi "ult", %[[VAL_32]], %[[VAL_30]] : index
+// CHECK: %[[VAL_34:.*]] = cmpi ult, %[[VAL_32]], %[[VAL_30]] : index
// CHECK: scf.condition(%[[VAL_34]]) %[[VAL_32]], %[[VAL_33]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_35:.*]]: index, %[[VAL_36:.*]]: index):
// CHECK: %[[VAL_37:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_35]]] : memref<?xindex>
-// CHECK: %[[VAL_38:.*]] = cmpi "eq", %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_38:.*]] = cmpi eq, %[[VAL_37]], %[[VAL_36]] : index
// CHECK: scf.if %[[VAL_38]] {
// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_35]]] : memref<?xf32>
// CHECK: %[[VAL_40:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_16]], %[[VAL_25]], %[[VAL_36]]] : memref<32x16x8xf32>
@@ -406,7 +406,7 @@ func @mul_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_43:.*]] = cmpi "eq", %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_43:.*]] = cmpi eq, %[[VAL_37]], %[[VAL_36]] : index
// CHECK: %[[VAL_44:.*]] = addi %[[VAL_35]], %[[VAL_8]] : index
// CHECK: %[[VAL_45:.*]] = select %[[VAL_43]], %[[VAL_44]], %[[VAL_35]] : index
// CHECK: %[[VAL_46:.*]] = addi %[[VAL_36]], %[[VAL_8]] : index
@@ -425,7 +425,7 @@ func @mul_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_52:.*]] = cmpi "eq", %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_52:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_53:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: %[[VAL_54:.*]] = select %[[VAL_52]], %[[VAL_53]], %[[VAL_24]] : index
// CHECK: %[[VAL_55:.*]] = addi %[[VAL_25]], %[[VAL_8]] : index
@@ -531,12 +531,12 @@ func @mul_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_14:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_15:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
// CHECK: %[[VAL_16:.*]]:2 = scf.while (%[[VAL_17:.*]] = %[[VAL_14]], %[[VAL_18:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_19:.*]] = cmpi "ult", %[[VAL_17]], %[[VAL_15]] : index
+// CHECK: %[[VAL_19:.*]] = cmpi ult, %[[VAL_17]], %[[VAL_15]] : index
// CHECK: scf.condition(%[[VAL_19]]) %[[VAL_17]], %[[VAL_18]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_20:.*]]: index, %[[VAL_21:.*]]: index):
// CHECK: %[[VAL_22:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_20]]] : memref<?xindex>
-// CHECK: %[[VAL_23:.*]] = cmpi "eq", %[[VAL_22]], %[[VAL_21]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi eq, %[[VAL_22]], %[[VAL_21]] : index
// CHECK: scf.if %[[VAL_23]] {
// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
// CHECK: %[[VAL_25:.*]] = muli %[[VAL_20]], %[[VAL_4]] : index
@@ -561,7 +561,7 @@ func @mul_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_36:.*]] = cmpi "eq", %[[VAL_22]], %[[VAL_21]] : index
+// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_22]], %[[VAL_21]] : index
// CHECK: %[[VAL_37:.*]] = addi %[[VAL_20]], %[[VAL_8]] : index
// CHECK: %[[VAL_38:.*]] = select %[[VAL_36]], %[[VAL_37]], %[[VAL_20]] : index
// CHECK: %[[VAL_39:.*]] = addi %[[VAL_21]], %[[VAL_8]] : index
@@ -668,12 +668,12 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_16]], %[[VAL_20:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_17]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_17]] : index
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_19]], %[[VAL_20]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_25:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_25:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: scf.if %[[VAL_25]] {
// CHECK: scf.for %[[VAL_26:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
// CHECK: %[[VAL_27:.*]] = muli %[[VAL_22]], %[[VAL_4]] : index
@@ -682,12 +682,12 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_30:.*]] = addi %[[VAL_28]], %[[VAL_8]] : index
// CHECK: %[[VAL_31:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<?xindex>
// CHECK: %[[VAL_32:.*]]:2 = scf.while (%[[VAL_33:.*]] = %[[VAL_29]], %[[VAL_34:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_35:.*]] = cmpi "ult", %[[VAL_33]], %[[VAL_31]] : index
+// CHECK: %[[VAL_35:.*]] = cmpi ult, %[[VAL_33]], %[[VAL_31]] : index
// CHECK: scf.condition(%[[VAL_35]]) %[[VAL_33]], %[[VAL_34]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_36:.*]]: index, %[[VAL_37:.*]]: index):
// CHECK: %[[VAL_38:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_36]]] : memref<?xindex>
-// CHECK: %[[VAL_39:.*]] = cmpi "eq", %[[VAL_38]], %[[VAL_37]] : index
+// CHECK: %[[VAL_39:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
// CHECK: scf.if %[[VAL_39]] {
// CHECK: %[[VAL_40:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_36]]] : memref<?xf32>
// CHECK: %[[VAL_41:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_23]], %[[VAL_26]], %[[VAL_37]]] : memref<32x16x8xf32>
@@ -700,7 +700,7 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_44:.*]] = cmpi "eq", %[[VAL_38]], %[[VAL_37]] : index
+// CHECK: %[[VAL_44:.*]] = cmpi eq, %[[VAL_38]], %[[VAL_37]] : index
// CHECK: %[[VAL_45:.*]] = addi %[[VAL_36]], %[[VAL_8]] : index
// CHECK: %[[VAL_46:.*]] = select %[[VAL_44]], %[[VAL_45]], %[[VAL_36]] : index
// CHECK: %[[VAL_47:.*]] = addi %[[VAL_37]], %[[VAL_8]] : index
@@ -722,7 +722,7 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_55:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: %[[VAL_56:.*]] = select %[[VAL_54]], %[[VAL_55]], %[[VAL_22]] : index
// CHECK: %[[VAL_57:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
@@ -832,23 +832,23 @@ func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_16:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_17:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
// CHECK: %[[VAL_18:.*]]:2 = scf.while (%[[VAL_19:.*]] = %[[VAL_16]], %[[VAL_20:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_21:.*]] = cmpi "ult", %[[VAL_19]], %[[VAL_17]] : index
+// CHECK: %[[VAL_21:.*]] = cmpi ult, %[[VAL_19]], %[[VAL_17]] : index
// CHECK: scf.condition(%[[VAL_21]]) %[[VAL_19]], %[[VAL_20]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_22:.*]]: index, %[[VAL_23:.*]]: index):
// CHECK: %[[VAL_24:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_22]]] : memref<?xindex>
-// CHECK: %[[VAL_25:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_25:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: scf.if %[[VAL_25]] {
// CHECK: %[[VAL_26:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]]:2 = scf.while (%[[VAL_30:.*]] = %[[VAL_26]], %[[VAL_31:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_32:.*]] = cmpi "ult", %[[VAL_30]], %[[VAL_28]] : index
+// CHECK: %[[VAL_32:.*]] = cmpi ult, %[[VAL_30]], %[[VAL_28]] : index
// CHECK: scf.condition(%[[VAL_32]]) %[[VAL_30]], %[[VAL_31]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index):
// CHECK: %[[VAL_35:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_33]]] : memref<?xindex>
-// CHECK: %[[VAL_36:.*]] = cmpi "eq", %[[VAL_35]], %[[VAL_34]] : index
+// CHECK: %[[VAL_36:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: scf.if %[[VAL_36]] {
// CHECK: scf.for %[[VAL_37:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
// CHECK: %[[VAL_38:.*]] = muli %[[VAL_33]], %[[VAL_5]] : index
@@ -867,7 +867,7 @@ func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_45:.*]] = cmpi "eq", %[[VAL_35]], %[[VAL_34]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: %[[VAL_46:.*]] = addi %[[VAL_33]], %[[VAL_8]] : index
// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_33]] : index
// CHECK: %[[VAL_48:.*]] = addi %[[VAL_34]], %[[VAL_8]] : index
@@ -890,7 +890,7 @@ func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_56:.*]] = cmpi "eq", %[[VAL_24]], %[[VAL_23]] : index
+// CHECK: %[[VAL_56:.*]] = cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_57:.*]] = addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: %[[VAL_58:.*]] = select %[[VAL_56]], %[[VAL_57]], %[[VAL_22]] : index
// CHECK: %[[VAL_59:.*]] = addi %[[VAL_23]], %[[VAL_8]] : index
@@ -1002,34 +1002,34 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
// CHECK: %[[VAL_19:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_18]], %[[VAL_22:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_23:.*]] = cmpi "ult", %[[VAL_21]], %[[VAL_19]] : index
+// CHECK: %[[VAL_23:.*]] = cmpi ult, %[[VAL_21]], %[[VAL_19]] : index
// CHECK: scf.condition(%[[VAL_23]]) %[[VAL_21]], %[[VAL_22]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index):
// CHECK: %[[VAL_26:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xindex>
-// CHECK: %[[VAL_27:.*]] = cmpi "eq", %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_27:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: scf.if %[[VAL_27]] {
// CHECK: %[[VAL_28:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: %[[VAL_30:.*]] = load %[[VAL_11]]{{\[}}%[[VAL_29]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]]:2 = scf.while (%[[VAL_32:.*]] = %[[VAL_28]], %[[VAL_33:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_34:.*]] = cmpi "ult", %[[VAL_32]], %[[VAL_30]] : index
+// CHECK: %[[VAL_34:.*]] = cmpi ult, %[[VAL_32]], %[[VAL_30]] : index
// CHECK: scf.condition(%[[VAL_34]]) %[[VAL_32]], %[[VAL_33]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_35:.*]]: index, %[[VAL_36:.*]]: index):
// CHECK: %[[VAL_37:.*]] = load %[[VAL_12]]{{\[}}%[[VAL_35]]] : memref<?xindex>
-// CHECK: %[[VAL_38:.*]] = cmpi "eq", %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_38:.*]] = cmpi eq, %[[VAL_37]], %[[VAL_36]] : index
// CHECK: scf.if %[[VAL_38]] {
// CHECK: %[[VAL_39:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_35]]] : memref<?xindex>
// CHECK: %[[VAL_40:.*]] = addi %[[VAL_35]], %[[VAL_8]] : index
// CHECK: %[[VAL_41:.*]] = load %[[VAL_13]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_42:.*]]:2 = scf.while (%[[VAL_43:.*]] = %[[VAL_39]], %[[VAL_44:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
-// CHECK: %[[VAL_45:.*]] = cmpi "ult", %[[VAL_43]], %[[VAL_41]] : index
+// CHECK: %[[VAL_45:.*]] = cmpi ult, %[[VAL_43]], %[[VAL_41]] : index
// CHECK: scf.condition(%[[VAL_45]]) %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: } do {
// CHECK: ^bb0(%[[VAL_46:.*]]: index, %[[VAL_47:.*]]: index):
// CHECK: %[[VAL_48:.*]] = load %[[VAL_14]]{{\[}}%[[VAL_46]]] : memref<?xindex>
-// CHECK: %[[VAL_49:.*]] = cmpi "eq", %[[VAL_48]], %[[VAL_47]] : index
+// CHECK: %[[VAL_49:.*]] = cmpi eq, %[[VAL_48]], %[[VAL_47]] : index
// CHECK: scf.if %[[VAL_49]] {
// CHECK: %[[VAL_50:.*]] = load %[[VAL_15]]{{\[}}%[[VAL_46]]] : memref<?xf32>
// CHECK: %[[VAL_51:.*]] = load %[[VAL_16]]{{\[}}%[[VAL_25]], %[[VAL_36]], %[[VAL_47]]] : memref<32x16x8xf32>
@@ -1042,7 +1042,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_54:.*]] = cmpi "eq", %[[VAL_48]], %[[VAL_47]] : index
+// CHECK: %[[VAL_54:.*]] = cmpi eq, %[[VAL_48]], %[[VAL_47]] : index
// CHECK: %[[VAL_55:.*]] = addi %[[VAL_46]], %[[VAL_8]] : index
// CHECK: %[[VAL_56:.*]] = select %[[VAL_54]], %[[VAL_55]], %[[VAL_46]] : index
// CHECK: %[[VAL_57:.*]] = addi %[[VAL_47]], %[[VAL_8]] : index
@@ -1061,7 +1061,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_63:.*]] = cmpi "eq", %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_63:.*]] = cmpi eq, %[[VAL_37]], %[[VAL_36]] : index
// CHECK: %[[VAL_64:.*]] = addi %[[VAL_35]], %[[VAL_8]] : index
// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_35]] : index
// CHECK: %[[VAL_66:.*]] = addi %[[VAL_36]], %[[VAL_8]] : index
@@ -1084,7 +1084,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>) -> tensor<
// CHECK: } else {
// CHECK: }
// CHECK: }
-// CHECK: %[[VAL_74:.*]] = cmpi "eq", %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_74:.*]] = cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_75:.*]] = addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: %[[VAL_76:.*]] = select %[[VAL_74]], %[[VAL_75]], %[[VAL_24]] : index
// CHECK: %[[VAL_77:.*]] = addi %[[VAL_25]], %[[VAL_8]] : index
diff --git a/mlir/test/Dialect/Linalg/tile-and-distribute.mlir b/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
index fcecf896ac5d..94c0e546db01 100644
--- a/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
+++ b/mlir/test/Dialect/Linalg/tile-and-distribute.mlir
@@ -42,8 +42,8 @@ func @gemm2(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
// CHECK-DAG: %[[BIDX:.*]] = "gpu.block_id"() {dimension = "x"}
// CHECK: %[[ITERY:.*]] = affine.apply #[[MAP0]]()[%[[BIDY]]]
// CHECK: %[[ITERX:.*]] = affine.apply #[[MAP0]]()[%[[BIDX]]]
-// CHECK: %[[INBOUNDSY:.*]] = cmpi "slt", %[[ITERY]], %{{.*}}
-// CHECK: %[[INBOUNDSX:.*]] = cmpi "slt", %[[ITERX]], %{{.*}}
+// CHECK: %[[INBOUNDSY:.*]] = cmpi slt, %[[ITERY]], %{{.*}}
+// CHECK: %[[INBOUNDSX:.*]] = cmpi slt, %[[ITERX]], %{{.*}}
// CHECK: %[[INBOUNDS:.*]] = and %[[INBOUNDSY]], %[[INBOUNDSX]]
// CHECK: scf.if %[[INBOUNDS]]
// CHECK: scf.for %[[ARG3:.*]] =
@@ -102,7 +102,7 @@ func @gemm4(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
// CHECK: %[[BIDY:.*]] = "gpu.block_id"() {dimension = "y"}
// CHECK: %[[BIDX:.*]] = "gpu.block_id"() {dimension = "x"}
// CHECK: %[[LBX:.*]] = affine.apply #[[MAP0]]()[%[[BIDX]]]
-// CHECK: %[[INBOUNDS:.*]] = cmpi "slt", %[[LBX]], %{{.*}}
+// CHECK: %[[INBOUNDS:.*]] = cmpi slt, %[[LBX]], %{{.*}}
// CHECK: scf.if %[[INBOUNDS]]
// CHECK: scf.for %[[ARG3:.*]] =
// CHECK: %[[OFFSETY:.*]] = affine.apply #[[MAP0]]()[%[[BIDY]]]
@@ -134,7 +134,7 @@ func @gemm5(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>)
// CHECK: %[[LBY:.*]] = affine.apply #[[MAP0]]()[%[[BIDY]]]
// CHECK: %[[LBX:.*]] = affine.apply #[[MAP0]]()[%[[BIDX]]]
// CHECK: %[[STEPX:.*]] = affine.apply #[[MAP0]]()[%[[NBLOCKSX]]]
-// CHECK: %[[INBOUNDS:.*]] = cmpi "slt", %[[LBY]], %{{.*}}
+// CHECK: %[[INBOUNDS:.*]] = cmpi slt, %[[LBY]], %{{.*}}
// CHECK: scf.if %[[INBOUNDS]]
// CHECK: scf.parallel (%[[ARG3.*]]) = (%[[LBX]]) to (%{{.*}}) step (%[[STEPX]])
// CHECK: scf.for %[[ARG4:.*]] =
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index cf49e85a4a28..2133aad70bd0 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -162,7 +162,7 @@ func @generic_vectorize(%arg0: memref<4x256xf32>, %arg1: memref<4x256xf32>,
%arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32,
%arg14 : f32):
%6 = addf %arg4, %arg6 : f32
- %7 = cmpf "ogt", %arg3, %arg6 : f32
+ %7 = cmpf ogt, %arg3, %arg6 : f32
%8 = constant 2.0 : f32
%9 = divf %arg5, %i : f32
%10 = exp2 %arg5 : f32
@@ -188,7 +188,7 @@ func @generic_vectorize(%arg0: memref<4x256xf32>, %arg1: memref<4x256xf32>,
// CHECK: %[[V0B:.*]] = vector.broadcast %[[V0]] : vector<256xf32> to vector<4x256xf32>
// CHECK: %[[ADD:.*]] = addf %[[V0B]], %[[V1]] : vector<4x256xf32>
// CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32>
-// CHECK: %[[CMP:.*]] = cmpf "ogt", %[[V2]], %[[V1]] : vector<4x256xf32>
+// CHECK: %[[CMP:.*]] = cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32>
// CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32>
// CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32>
// CHECK: %[[DIV:.*]] = divf %[[V3]], %[[ARG3B]] : vector<4x256xf32>
@@ -241,7 +241,7 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
%arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32,
%arg14 : f32):
%6 = addf %arg4, %arg6 : f32
- %7 = cmpf "ogt", %arg3, %arg6 : f32
+ %7 = cmpf ogt, %arg3, %arg6 : f32
%8 = constant 2.0 : f32
%9 = divf %arg5, %i : f32
%10 = exp2 %arg5 : f32
@@ -272,7 +272,7 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
// CHECK: %[[V0B:.*]] = vector.broadcast %[[V0]] : vector<256xf32> to vector<4x256xf32>
// CHECK: %[[ADD:.*]] = addf %[[V0B]], %[[V1]] : vector<4x256xf32>
// CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32>
-// CHECK: %[[CMP:.*]] = cmpf "ogt", %[[V2]], %[[V1]] : vector<4x256xf32>
+// CHECK: %[[CMP:.*]] = cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32>
// CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32>
// CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32>
// CHECK: %[[DIV:.*]] = divf %[[V3]], %[[ARG3B]] : vector<4x256xf32>
diff --git a/mlir/test/Dialect/SCF/for-loop-specialization.mlir b/mlir/test/Dialect/SCF/for-loop-specialization.mlir
index 1be3f48dc549..473b8232e351 100644
--- a/mlir/test/Dialect/SCF/for-loop-specialization.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-specialization.mlir
@@ -25,7 +25,7 @@ func @for(%outer: index, %A: memref<?xf32>, %B: memref<?xf32>,
// CHECK: [[DIM_0:%.*]] = dim [[ARG1]], [[CST_0]] : memref<?xf32>
// CHECK: [[MIN:%.*]] = affine.min #map(){{\[}}[[DIM_0]], [[ARG0]]]
// CHECK: [[CST_1024:%.*]] = constant 1024 : index
-// CHECK: [[PRED:%.*]] = cmpi "eq", [[MIN]], [[CST_1024]] : index
+// CHECK: [[PRED:%.*]] = cmpi eq, [[MIN]], [[CST_1024]] : index
// CHECK: scf.if [[PRED]] {
// CHECK: scf.for [[IDX0:%.*]] = [[CST_0]] to [[CST_1024]] step [[CST_1]] {
// CHECK: store
diff --git a/mlir/test/Dialect/SCF/ops.mlir b/mlir/test/Dialect/SCF/ops.mlir
index 8e9f6a0ed33d..8192653787c2 100644
--- a/mlir/test/Dialect/SCF/ops.mlir
+++ b/mlir/test/Dialect/SCF/ops.mlir
@@ -7,9 +7,9 @@
func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
scf.for %i0 = %arg0 to %arg1 step %arg2 {
scf.for %i1 = %arg0 to %arg1 step %arg2 {
- %min_cmp = cmpi "slt", %i0, %i1 : index
+ %min_cmp = cmpi slt, %i0, %i1 : index
%min = select %min_cmp, %i0, %i1 : index
- %max_cmp = cmpi "sge", %i0, %i1 : index
+ %max_cmp = cmpi sge, %i0, %i1 : index
%max = select %max_cmp, %i0, %i1 : index
scf.for %i2 = %min to %max step %i1 {
}
@@ -20,9 +20,9 @@ func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-LABEL: func @std_for(
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
-// CHECK-NEXT: %{{.*}} = cmpi "slt", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = cmpi slt, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : index
-// CHECK-NEXT: %{{.*}} = cmpi "sge", %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = cmpi sge, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
@@ -55,9 +55,9 @@ func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
%step = constant 1 : index
scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
step (%arg4, %step) {
- %min_cmp = cmpi "slt", %i0, %i1 : index
+ %min_cmp = cmpi slt, %i0, %i1 : index
%min = select %min_cmp, %i0, %i1 : index
- %max_cmp = cmpi "sge", %i0, %i1 : index
+ %max_cmp = cmpi sge, %i0, %i1 : index
%max = select %max_cmp, %i0, %i1 : index
%zero = constant 0.0 : f32
%int_zero = constant 0 : i32
@@ -88,9 +88,9 @@ func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: %[[STEP:.*]] = constant 1 : index
// CHECK-NEXT: scf.parallel (%[[I0:.*]], %[[I1:.*]]) = (%[[ARG0]], %[[ARG1]]) to
// CHECK: (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[STEP]]) {
-// CHECK-NEXT: %[[MIN_CMP:.*]] = cmpi "slt", %[[I0]], %[[I1]] : index
+// CHECK-NEXT: %[[MIN_CMP:.*]] = cmpi slt, %[[I0]], %[[I1]] : index
// CHECK-NEXT: %[[MIN:.*]] = select %[[MIN_CMP]], %[[I0]], %[[I1]] : index
-// CHECK-NEXT: %[[MAX_CMP:.*]] = cmpi "sge", %[[I0]], %[[I1]] : index
+// CHECK-NEXT: %[[MAX_CMP:.*]] = cmpi sge, %[[I0]], %[[I1]] : index
// CHECK-NEXT: %[[MAX:.*]] = select %[[MAX_CMP]], %[[I0]], %[[I1]] : index
// CHECK-NEXT: %[[ZERO:.*]] = constant 0.000000e+00 : f32
// CHECK-NEXT: %[[INT_ZERO:.*]] = constant 0 : i32
@@ -209,7 +209,7 @@ func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index, %ub: index, %ste
%c0 = constant 0.0 : f32
%sum = scf.for %iv = %lb to %ub step %step iter_args(%sum_iter = %sum_0) -> (f32) {
%t = load %buffer[%iv] : memref<1024xf32>
- %cond = cmpf "ugt", %t, %c0 : f32
+ %cond = cmpf ugt, %t, %c0 : f32
%sum_next = scf.if %cond -> (f32) {
%new_sum = addf %sum_iter, %t : f32
scf.yield %new_sum : f32
@@ -230,7 +230,7 @@ func @conditional_reduce(%buffer: memref<1024xf32>, %lb: index, %ub: index, %ste
// CHECK-NEXT: %[[RESULT:.*]] = scf.for %[[IV:.*]] = %[[ARG1]] to %[[ARG2]] step %[[ARG3]]
// CHECK-SAME: iter_args(%[[ITER:.*]] = %[[INIT]]) -> (f32) {
// CHECK-NEXT: %[[T:.*]] = load %[[ARG0]][%[[IV]]]
-// CHECK-NEXT: %[[COND:.*]] = cmpf "ugt", %[[T]], %[[ZERO]]
+// CHECK-NEXT: %[[COND:.*]] = cmpf ugt, %[[T]], %[[ZERO]]
// CHECK-NEXT: %[[IFRES:.*]] = scf.if %[[COND]] -> (f32) {
// CHECK-NEXT: %[[THENRES:.*]] = addf %[[ITER]], %[[T]]
// CHECK-NEXT: scf.yield %[[THENRES]] : f32
diff --git a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
index d11b4d0625e3..959b2f8dabc1 100644
--- a/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
+++ b/mlir/test/Dialect/SCF/parallel-loop-specialization.mlir
@@ -29,9 +29,9 @@ func @parallel_loop(%outer_i0: index, %outer_i1: index, %A: memref<?x?xf32>, %B:
// CHECK: [[VAL_10:%.*]] = affine.min #map0(){{\[}}[[VAL_8]], [[VAL_0]]]
// CHECK: [[VAL_11:%.*]] = affine.min #map1(){{\[}}[[VAL_9]], [[VAL_1]]]
// CHECK: [[VAL_12:%.*]] = constant 1024 : index
-// CHECK: [[VAL_13:%.*]] = cmpi "eq", [[VAL_10]], [[VAL_12]] : index
+// CHECK: [[VAL_13:%.*]] = cmpi eq, [[VAL_10]], [[VAL_12]] : index
// CHECK: [[VAL_14:%.*]] = constant 64 : index
-// CHECK: [[VAL_15:%.*]] = cmpi "eq", [[VAL_11]], [[VAL_14]] : index
+// CHECK: [[VAL_15:%.*]] = cmpi eq, [[VAL_11]], [[VAL_14]] : index
// CHECK: [[VAL_16:%.*]] = and [[VAL_13]], [[VAL_15]] : i1
// CHECK: scf.if [[VAL_16]] {
// CHECK: scf.parallel ([[VAL_17:%.*]], [[VAL_18:%.*]]) = ([[VAL_6]], [[VAL_6]]) to ([[VAL_12]], [[VAL_14]]) step ([[VAL_7]], [[VAL_7]]) {
diff --git a/mlir/test/Dialect/SPIRV/IR/availability.mlir b/mlir/test/Dialect/SPIRV/IR/availability.mlir
index 322cc533c826..2ce14cbededd 100644
--- a/mlir/test/Dialect/SPIRV/IR/availability.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/availability.mlir
@@ -26,7 +26,7 @@ func @subgroup_ballot(%predicate: i1) -> vector<4xi32> {
// CHECK: max version: v1.5
// CHECK: extensions: [ ]
// CHECK: capabilities: [ [GroupNonUniformBallot] ]
- %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32>
+ %0 = spv.GroupNonUniformBallot Workgroup %predicate : vector<4xi32>
return %0: vector<4xi32>
}
diff --git a/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir b/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
index bd16d29add81..1a9240e3b23f 100644
--- a/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
@@ -5,16 +5,16 @@
//===----------------------------------------------------------------------===//
func @control_barrier_0() -> () {
- // CHECK: spv.ControlBarrier "Workgroup", "Device", "Acquire|UniformMemory"
- spv.ControlBarrier "Workgroup", "Device", "Acquire|UniformMemory"
+ // CHECK: spv.ControlBarrier Workgroup, Device, "Acquire|UniformMemory"
+ spv.ControlBarrier Workgroup, Device, "Acquire|UniformMemory"
return
}
// -----
func @control_barrier_1() -> () {
- // expected-error @+1 {{invalid execution_scope attribute specification: "Something"}}
- spv.ControlBarrier "Something", "Device", "Acquire|UniformMemory"
+ // expected-error @+1 {{expected string or keyword containing one of the following enum values}}
+ spv.ControlBarrier Something, Device, "Acquire|UniformMemory"
return
}
@@ -26,16 +26,16 @@ func @control_barrier_1() -> () {
//===----------------------------------------------------------------------===//
func @memory_barrier_0() -> () {
- // CHECK: spv.MemoryBarrier "Device", "Acquire|UniformMemory"
- spv.MemoryBarrier "Device", "Acquire|UniformMemory"
+ // CHECK: spv.MemoryBarrier Device, "Acquire|UniformMemory"
+ spv.MemoryBarrier Device, "Acquire|UniformMemory"
return
}
// -----
func @memory_barrier_1() -> () {
- // CHECK: spv.MemoryBarrier "Workgroup", "Acquire"
- spv.MemoryBarrier "Workgroup", "Acquire"
+ // CHECK: spv.MemoryBarrier Workgroup, Acquire
+ spv.MemoryBarrier Workgroup, Acquire
return
}
@@ -43,7 +43,7 @@ func @memory_barrier_1() -> () {
func @memory_barrier_2() -> () {
// expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
- spv.MemoryBarrier "Device", "Acquire|Release"
+ spv.MemoryBarrier Device, "Acquire|Release"
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
index c71cc3602b2e..d46aaa235d53 100644
--- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
@@ -17,24 +17,24 @@ func @subgroup_ballot(%predicate: i1) -> vector<4xi32> {
//===----------------------------------------------------------------------===//
func @group_broadcast_scalar(%value: f32, %localid: i32 ) -> f32 {
- // CHECK: spv.GroupBroadcast "Workgroup" %{{.*}}, %{{.*}} : f32, i32
- %0 = spv.GroupBroadcast "Workgroup" %value, %localid : f32, i32
+ // CHECK: spv.GroupBroadcast Workgroup %{{.*}}, %{{.*}} : f32, i32
+ %0 = spv.GroupBroadcast Workgroup %value, %localid : f32, i32
return %0: f32
}
// -----
func @group_broadcast_scalar_vector(%value: f32, %localid: vector<3xi32> ) -> f32 {
- // CHECK: spv.GroupBroadcast "Workgroup" %{{.*}}, %{{.*}} : f32, vector<3xi32>
- %0 = spv.GroupBroadcast "Workgroup" %value, %localid : f32, vector<3xi32>
+ // CHECK: spv.GroupBroadcast Workgroup %{{.*}}, %{{.*}} : f32, vector<3xi32>
+ %0 = spv.GroupBroadcast Workgroup %value, %localid : f32, vector<3xi32>
return %0: f32
}
// -----
func @group_broadcast_vector(%value: vector<4xf32>, %localid: vector<3xi32> ) -> vector<4xf32> {
- // CHECK: spv.GroupBroadcast "Subgroup" %{{.*}}, %{{.*}} : vector<4xf32>, vector<3xi32>
- %0 = spv.GroupBroadcast "Subgroup" %value, %localid : vector<4xf32>, vector<3xi32>
+ // CHECK: spv.GroupBroadcast Subgroup %{{.*}}, %{{.*}} : vector<4xf32>, vector<3xi32>
+ %0 = spv.GroupBroadcast Subgroup %value, %localid : vector<4xf32>, vector<3xi32>
return %0: vector<4xf32>
}
@@ -42,7 +42,7 @@ func @group_broadcast_vector(%value: vector<4xf32>, %localid: vector<3xi32> ) ->
func @group_broadcast_negative_scope(%value: f32, %localid: vector<3xi32> ) -> f32 {
// expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}}
- %0 = spv.GroupBroadcast "Device" %value, %localid : f32, vector<3xi32>
+ %0 = spv.GroupBroadcast Device %value, %localid : f32, vector<3xi32>
return %0: f32
}
@@ -50,7 +50,7 @@ func @group_broadcast_negative_scope(%value: f32, %localid: vector<3xi32> ) -> f
func @group_broadcast_negative_locid_dtype(%value: f32, %localid: vector<3xf32> ) -> f32 {
// expected-error @+1 {{operand #1 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values}}
- %0 = spv.GroupBroadcast "Subgroup" %value, %localid : f32, vector<3xf32>
+ %0 = spv.GroupBroadcast Subgroup %value, %localid : f32, vector<3xf32>
return %0: f32
}
@@ -58,7 +58,7 @@ func @group_broadcast_negative_locid_dtype(%value: f32, %localid: vector<3xf32>
func @group_broadcast_negative_locid_vec4(%value: f32, %localid: vector<4xi32> ) -> f32 {
// expected-error @+1 {{localid is a vector and can be with only 2 or 3 components, actual number is 4}}
- %0 = spv.GroupBroadcast "Subgroup" %value, %localid : f32, vector<4xi32>
+ %0 = spv.GroupBroadcast Subgroup %value, %localid : f32, vector<4xi32>
return %0: f32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
index 5839ee7c5627..61febd37460a 100644
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
@@ -5,8 +5,8 @@
//===----------------------------------------------------------------------===//
func @group_non_uniform_ballot(%predicate: i1) -> vector<4xi32> {
- // CHECK: %{{.*}} = spv.GroupNonUniformBallot "Workgroup" %{{.*}}: vector<4xi32>
- %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32>
+ // CHECK: %{{.*}} = spv.GroupNonUniformBallot Workgroup %{{.*}}: vector<4xi32>
+ %0 = spv.GroupNonUniformBallot Workgroup %predicate : vector<4xi32>
return %0: vector<4xi32>
}
@@ -14,7 +14,7 @@ func @group_non_uniform_ballot(%predicate: i1) -> vector<4xi32> {
func @group_non_uniform_ballot(%predicate: i1) -> vector<4xi32> {
// expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}}
- %0 = spv.GroupNonUniformBallot "Device" %predicate : vector<4xi32>
+ %0 = spv.GroupNonUniformBallot Device %predicate : vector<4xi32>
return %0: vector<4xi32>
}
@@ -22,7 +22,7 @@ func @group_non_uniform_ballot(%predicate: i1) -> vector<4xi32> {
func @group_non_uniform_ballot(%predicate: i1) -> vector<4xsi32> {
// expected-error @+1 {{op result #0 must be vector of 8/16/32/64-bit signless/unsigned integer values of length 4, but got 'vector<4xsi32>'}}
- %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xsi32>
+ %0 = spv.GroupNonUniformBallot Workgroup %predicate : vector<4xsi32>
return %0: vector<4xsi32>
}
@@ -34,8 +34,8 @@ func @group_non_uniform_ballot(%predicate: i1) -> vector<4xsi32> {
func @group_non_uniform_broadcast_scalar(%value: f32) -> f32 {
%one = spv.constant 1 : i32
- // CHECK: spv.GroupNonUniformBroadcast "Workgroup" %{{.*}}, %{{.*}} : f32, i32
- %0 = spv.GroupNonUniformBroadcast "Workgroup" %value, %one : f32, i32
+ // CHECK: spv.GroupNonUniformBroadcast Workgroup %{{.*}}, %{{.*}} : f32, i32
+ %0 = spv.GroupNonUniformBroadcast Workgroup %value, %one : f32, i32
return %0: f32
}
@@ -43,8 +43,8 @@ func @group_non_uniform_broadcast_scalar(%value: f32) -> f32 {
func @group_non_uniform_broadcast_vector(%value: vector<4xf32>) -> vector<4xf32> {
%one = spv.constant 1 : i32
- // CHECK: spv.GroupNonUniformBroadcast "Subgroup" %{{.*}}, %{{.*}} : vector<4xf32>, i32
- %0 = spv.GroupNonUniformBroadcast "Subgroup" %value, %one : vector<4xf32>, i32
+ // CHECK: spv.GroupNonUniformBroadcast Subgroup %{{.*}}, %{{.*}} : vector<4xf32>, i32
+ %0 = spv.GroupNonUniformBroadcast Subgroup %value, %one : vector<4xf32>, i32
return %0: vector<4xf32>
}
@@ -53,7 +53,7 @@ func @group_non_uniform_broadcast_vector(%value: vector<4xf32>) -> vector<4xf32>
func @group_non_uniform_broadcast_negative_scope(%value: f32, %localid: i32 ) -> f32 {
%one = spv.constant 1 : i32
// expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}}
- %0 = spv.GroupNonUniformBroadcast "Device" %value, %one : f32, i32
+ %0 = spv.GroupNonUniformBroadcast Device %value, %one : f32, i32
return %0: f32
}
@@ -61,7 +61,7 @@ func @group_non_uniform_broadcast_negative_scope(%value: f32, %localid: i32 ) ->
func @group_non_uniform_broadcast_negative_non_const(%value: f32, %localid: i32) -> f32 {
// expected-error @+1 {{id must be the result of a constant op}}
- %0 = spv.GroupNonUniformBroadcast "Subgroup" %value, %localid : f32, i32
+ %0 = spv.GroupNonUniformBroadcast Subgroup %value, %localid : f32, i32
return %0: f32
}
@@ -73,8 +73,8 @@ func @group_non_uniform_broadcast_negative_non_const(%value: f32, %localid: i32)
// CHECK-LABEL: @group_non_uniform_elect
func @group_non_uniform_elect() -> i1 {
- // CHECK: %{{.+}} = spv.GroupNonUniformElect "Workgroup" : i1
- %0 = spv.GroupNonUniformElect "Workgroup" : i1
+ // CHECK: %{{.+}} = spv.GroupNonUniformElect Workgroup : i1
+ %0 = spv.GroupNonUniformElect Workgroup : i1
return %0: i1
}
@@ -82,7 +82,7 @@ func @group_non_uniform_elect() -> i1 {
func @group_non_uniform_elect() -> i1 {
// expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}}
- %0 = spv.GroupNonUniformElect "CrossDevice" : i1
+ %0 = spv.GroupNonUniformElect CrossDevice : i1
return %0: i1
}
diff --git a/mlir/test/Dialect/SPIRV/IR/target-env.mlir b/mlir/test/Dialect/SPIRV/IR/target-env.mlir
index c0bc02fae089..e58c9f1c75bc 100644
--- a/mlir/test/Dialect/SPIRV/IR/target-env.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/target-env.mlir
@@ -59,7 +59,7 @@ func @cmp_exchange_weak_unsupported_version(%ptr: !spv.ptr<i32, Workgroup>, %val
func @group_non_uniform_ballot_suitable_version(%predicate: i1) -> vector<4xi32> attributes {
spv.target_env = #spv.target_env<#spv.vce<v1.4, [GroupNonUniformBallot], []>, {}>
} {
- // CHECK: spv.GroupNonUniformBallot "Workgroup"
+ // CHECK: spv.GroupNonUniformBallot Workgroup
%0 = "test.convert_to_group_non_uniform_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
return %0: vector<4xi32>
}
diff --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
index 07d2d05aa741..3dcb206f3563 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
@@ -27,7 +27,7 @@ spv.module Logical GLSL450 attributes {
#spv.vce<v1.5, [Shader, GroupNonUniformBallot], []>, {}>
} {
spv.func @group_non_uniform_ballot(%predicate : i1) -> vector<4xi32> "None" {
- %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32>
+ %0 = spv.GroupNonUniformBallot Workgroup %predicate : vector<4xi32>
spv.ReturnValue %0: vector<4xi32>
}
}
diff --git a/mlir/test/Dialect/Standard/canonicalize.mlir b/mlir/test/Dialect/Standard/canonicalize.mlir
index f3b7ccddd1ff..e7e4d4f49222 100644
--- a/mlir/test/Dialect/Standard/canonicalize.mlir
+++ b/mlir/test/Dialect/Standard/canonicalize.mlir
@@ -82,16 +82,16 @@ func @dim_of_dynamic_tensor_from_elements(%arg0: index, %arg1: index) -> index {
// CHECK-SAME: %[[F]], %[[F]], %[[F]], %[[F]], %[[F]]
func @cmpi_equal_operands(%arg0: i64)
-> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1) {
- %0 = cmpi "eq", %arg0, %arg0 : i64
- %1 = cmpi "sle", %arg0, %arg0 : i64
- %2 = cmpi "sge", %arg0, %arg0 : i64
- %3 = cmpi "ule", %arg0, %arg0 : i64
- %4 = cmpi "uge", %arg0, %arg0 : i64
- %5 = cmpi "ne", %arg0, %arg0 : i64
- %6 = cmpi "slt", %arg0, %arg0 : i64
- %7 = cmpi "sgt", %arg0, %arg0 : i64
- %8 = cmpi "ult", %arg0, %arg0 : i64
- %9 = cmpi "ugt", %arg0, %arg0 : i64
+ %0 = cmpi eq, %arg0, %arg0 : i64
+ %1 = cmpi sle, %arg0, %arg0 : i64
+ %2 = cmpi sge, %arg0, %arg0 : i64
+ %3 = cmpi ule, %arg0, %arg0 : i64
+ %4 = cmpi uge, %arg0, %arg0 : i64
+ %5 = cmpi ne, %arg0, %arg0 : i64
+ %6 = cmpi slt, %arg0, %arg0 : i64
+ %7 = cmpi sgt, %arg0, %arg0 : i64
+ %8 = cmpi ult, %arg0, %arg0 : i64
+ %9 = cmpi ugt, %arg0, %arg0 : i64
return %0, %1, %2, %3, %4, %5, %6, %7, %8, %9
: i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
}
diff --git a/mlir/test/Dialect/Standard/expand-ops.mlir b/mlir/test/Dialect/Standard/expand-ops.mlir
index ddc41faad29c..b28c73ea469a 100644
--- a/mlir/test/Dialect/Standard/expand-ops.mlir
+++ b/mlir/test/Dialect/Standard/expand-ops.mlir
@@ -3,12 +3,12 @@
// CHECK-LABEL: func @atomic_rmw_to_generic
// CHECK-SAME: ([[F:%.*]]: memref<10xf32>, [[f:%.*]]: f32, [[i:%.*]]: index)
func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
- %x = atomic_rmw "maxf" %f, %F[%i] : (f32, memref<10xf32>) -> f32
+ %x = atomic_rmw maxf %f, %F[%i] : (f32, memref<10xf32>) -> f32
return %x : f32
}
// CHECK: %0 = std.generic_atomic_rmw %arg0[%arg2] : memref<10xf32> {
// CHECK: ^bb0([[CUR_VAL:%.*]]: f32):
-// CHECK: [[CMP:%.*]] = cmpf "ogt", [[CUR_VAL]], [[f]] : f32
+// CHECK: [[CMP:%.*]] = cmpf ogt, [[CUR_VAL]], [[f]] : f32
// CHECK: [[SELECT:%.*]] = select [[CMP]], [[CUR_VAL]], [[f]] : f32
// CHECK: atomic_yield [[SELECT]] : f32
// CHECK: }
@@ -18,7 +18,7 @@ func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
// CHECK-LABEL: func @atomic_rmw_no_conversion
func @atomic_rmw_no_conversion(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
- %x = atomic_rmw "addf" %f, %F[%i] : (f32, memref<10xf32>) -> f32
+ %x = atomic_rmw addf %f, %F[%i] : (f32, memref<10xf32>) -> f32
return %x : f32
}
// CHECK-NOT: generic_atomic_rmw
@@ -35,7 +35,7 @@ func @ceildivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[ONE:%.+]] = constant 1 : i32
// CHECK: [[ZERO:%.+]] = constant 0 : i32
// CHECK: [[MINONE:%.+]] = constant -1 : i32
-// CHECK: [[CMP1:%.+]] = cmpi "sgt", [[ARG1]], [[ZERO]] : i32
+// CHECK: [[CMP1:%.+]] = cmpi sgt, [[ARG1]], [[ZERO]] : i32
// CHECK: [[X:%.+]] = select [[CMP1]], [[MINONE]], [[ONE]] : i32
// CHECK: [[TRUE1:%.+]] = addi [[X]], [[ARG0]] : i32
// CHECK: [[TRUE2:%.+]] = divi_signed [[TRUE1]], [[ARG1]] : i32
@@ -43,10 +43,10 @@ func @ceildivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[FALSE1:%.+]] = subi [[ZERO]], [[ARG0]] : i32
// CHECK: [[FALSE2:%.+]] = divi_signed [[FALSE1]], [[ARG1]] : i32
// CHECK: [[FALSE3:%.+]] = subi [[ZERO]], [[FALSE2]] : i32
-// CHECK: [[NNEG:%.+]] = cmpi "slt", [[ARG0]], [[ZERO]] : i32
-// CHECK: [[NPOS:%.+]] = cmpi "sgt", [[ARG0]], [[ZERO]] : i32
-// CHECK: [[MNEG:%.+]] = cmpi "slt", [[ARG1]], [[ZERO]] : i32
-// CHECK: [[MPOS:%.+]] = cmpi "sgt", [[ARG1]], [[ZERO]] : i32
+// CHECK: [[NNEG:%.+]] = cmpi slt, [[ARG0]], [[ZERO]] : i32
+// CHECK: [[NPOS:%.+]] = cmpi sgt, [[ARG0]], [[ZERO]] : i32
+// CHECK: [[MNEG:%.+]] = cmpi slt, [[ARG1]], [[ZERO]] : i32
+// CHECK: [[MPOS:%.+]] = cmpi sgt, [[ARG1]], [[ZERO]] : i32
// CHECK: [[TERM1:%.+]] = and [[NNEG]], [[MNEG]] : i1
// CHECK: [[TERM2:%.+]] = and [[NPOS]], [[MPOS]] : i1
// CHECK: [[CMP2:%.+]] = or [[TERM1]], [[TERM2]] : i1
@@ -64,16 +64,16 @@ func @floordivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[ONE:%.+]] = constant 1 : i32
// CHECK: [[ZERO:%.+]] = constant 0 : i32
// CHECK: [[MIN1:%.+]] = constant -1 : i32
-// CHECK: [[CMP1:%.+]] = cmpi "slt", [[ARG1]], [[ZERO]] : i32
+// CHECK: [[CMP1:%.+]] = cmpi slt, [[ARG1]], [[ZERO]] : i32
// CHECK: [[X:%.+]] = select [[CMP1]], [[ONE]], [[MIN1]] : i32
// CHECK: [[TRUE1:%.+]] = subi [[X]], [[ARG0]] : i32
// CHECK: [[TRUE2:%.+]] = divi_signed [[TRUE1]], [[ARG1]] : i32
// CHECK: [[TRUE3:%.+]] = subi [[MIN1]], [[TRUE2]] : i32
// CHECK: [[FALSE:%.+]] = divi_signed [[ARG0]], [[ARG1]] : i32
-// CHECK: [[NNEG:%.+]] = cmpi "slt", [[ARG0]], [[ZERO]] : i32
-// CHECK: [[NPOS:%.+]] = cmpi "sgt", [[ARG0]], [[ZERO]] : i32
-// CHECK: [[MNEG:%.+]] = cmpi "slt", [[ARG1]], [[ZERO]] : i32
-// CHECK: [[MPOS:%.+]] = cmpi "sgt", [[ARG1]], [[ZERO]] : i32
+// CHECK: [[NNEG:%.+]] = cmpi slt, [[ARG0]], [[ZERO]] : i32
+// CHECK: [[NPOS:%.+]] = cmpi sgt, [[ARG0]], [[ZERO]] : i32
+// CHECK: [[MNEG:%.+]] = cmpi slt, [[ARG1]], [[ZERO]] : i32
+// CHECK: [[MPOS:%.+]] = cmpi sgt, [[ARG1]], [[ZERO]] : i32
// CHECK: [[TERM1:%.+]] = and [[NNEG]], [[MPOS]] : i1
// CHECK: [[TERM2:%.+]] = and [[NPOS]], [[MNEG]] : i1
// CHECK: [[CMP2:%.+]] = or [[TERM1]], [[TERM2]] : i1
diff --git a/mlir/test/Dialect/Standard/expand-tanh.mlir b/mlir/test/Dialect/Standard/expand-tanh.mlir
index 557d1d0a808a..0c9bf0be3cf7 100644
--- a/mlir/test/Dialect/Standard/expand-tanh.mlir
+++ b/mlir/test/Dialect/Standard/expand-tanh.mlir
@@ -18,6 +18,6 @@ func @tanh(%arg: f32) -> f32 {
// CHECK: %[[DIVIDEND2:.+]] = subf %[[EXP2]], %[[ONE]] : f32
// CHECK: %[[DIVISOR2:.+]] = addf %[[EXP2]], %[[ONE]] : f32
// CHECK: %[[RES2:.+]] = divf %[[DIVIDEND2]], %[[DIVISOR2]] : f32
-// CHECK: %[[COND:.+]] = cmpf "oge", %arg0, %[[ZERO]] : f32
+// CHECK: %[[COND:.+]] = cmpf oge, %arg0, %[[ZERO]] : f32
// CHECK: %[[RESULT:.+]] = select %[[COND]], %[[RES1]], %[[RES2]] : f32
// CHECK: return %[[RESULT]]
diff --git a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
index aaaa7adf6472..2c3ac0fe97bb 100644
--- a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
@@ -803,10 +803,10 @@ func @genbool_var_1d(%arg0: index) -> vector<3xi1> {
// CHECK: %[[c0:.*]] = constant 0 : index
// CHECK: %[[c1:.*]] = constant 1 : index
// CHECK: %[[T0:.*]] = vector.create_mask %[[B]] : vector<3xi1>
-// CHECK: %[[T1:.*]] = cmpi "slt", %[[c0]], %[[A]] : index
+// CHECK: %[[T1:.*]] = cmpi slt, %[[c0]], %[[A]] : index
// CHECK: %[[T2:.*]] = select %[[T1]], %[[T0]], %[[C1]] : vector<3xi1>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C2]] [0] : vector<3xi1> into vector<2x3xi1>
-// CHECK: %[[T4:.*]] = cmpi "slt", %[[c1]], %[[A]] : index
+// CHECK: %[[T4:.*]] = cmpi slt, %[[c1]], %[[A]] : index
// CHECK: %[[T5:.*]] = select %[[T4]], %[[T0]], %[[C1]] : vector<3xi1>
// CHECK: %[[T6:.*]] = vector.insert %[[T5]], %[[T3]] [1] : vector<3xi1> into vector<2x3xi1>
// CHECK: return %[[T6]] : vector<2x3xi1>
@@ -826,13 +826,13 @@ func @genbool_var_2d(%arg0: index, %arg1: index) -> vector<2x3xi1> {
// CHECK: %[[c0:.*]] = constant 0 : index
// CHECK: %[[c1:.*]] = constant 1 : index
// CHECK: %[[T0:.*]] = vector.create_mask %[[C]] : vector<7xi1>
-// CHECK: %[[T1:.*]] = cmpi "slt", %[[c0]], %[[B]] : index
+// CHECK: %[[T1:.*]] = cmpi slt, %[[c0]], %[[B]] : index
// CHECK: %[[T2:.*]] = select %[[T1]], %[[T0]], %[[C1]] : vector<7xi1>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C2]] [0] : vector<7xi1> into vector<1x7xi1>
-// CHECK: %[[T4:.*]] = cmpi "slt", %[[c0]], %[[A]] : index
+// CHECK: %[[T4:.*]] = cmpi slt, %[[c0]], %[[A]] : index
// CHECK: %[[T5:.*]] = select %[[T4]], %[[T3]], %[[C2]] : vector<1x7xi1>
// CHECK: %[[T6:.*]] = vector.insert %[[T5]], %[[C3]] [0] : vector<1x7xi1> into vector<2x1x7xi1>
-// CHECK: %[[T7:.*]] = cmpi "slt", %[[c1]], %[[A]] : index
+// CHECK: %[[T7:.*]] = cmpi slt, %[[c1]], %[[A]] : index
// CHECK: %[[T8:.*]] = select %[[T7]], %[[T3]], %[[C2]] : vector<1x7xi1>
// CHECK: %[[T9:.*]] = vector.insert %[[T8]], %[[T6]] [1] : vector<1x7xi1> into vector<2x1x7xi1>
// CHECK: return %[[T9]] : vector<2x1x7xi1>
diff --git a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
index e36454203982..a1552f11b64f 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-full-partial-split.mlir
@@ -33,10 +33,10 @@ func @split_vector_transfer_read_2d(%A: memref<?x8xf32>, %i: index, %j: index) -
// %i + 4 <= dim(%A, 0)
// CHECK: %[[idx0:.*]] = affine.apply #[[$map_p4]]()[%[[i]]]
// CHECK: %[[d0:.*]] = dim %[[A]], %[[c0]] : memref<?x8xf32>
- // CHECK: %[[cmp0:.*]] = cmpi "sle", %[[idx0]], %[[d0]] : index
+ // CHECK: %[[cmp0:.*]] = cmpi sle, %[[idx0]], %[[d0]] : index
// %j + 8 <= dim(%A, 1)
// CHECK: %[[idx1:.*]] = affine.apply #[[$map_p8]]()[%[[j]]]
- // CHECK: %[[cmp1:.*]] = cmpi "sle", %[[idx1]], %[[c8]] : index
+ // CHECK: %[[cmp1:.*]] = cmpi sle, %[[idx1]], %[[c8]] : index
// are both conds true
// CHECK: %[[cond:.*]] = and %[[cmp0]], %[[cmp1]] : i1
// CHECK: %[[ifres:.*]]:3 = scf.if %[[cond]] -> (memref<?x8xf32>, index, index) {
@@ -67,10 +67,10 @@ func @split_vector_transfer_read_2d(%A: memref<?x8xf32>, %i: index, %j: index) -
// %i + 4 <= dim(%A, 0)
// LINALG: %[[idx0:.*]] = affine.apply #[[$map_p4]]()[%[[i]]]
// LINALG: %[[d0:.*]] = dim %[[A]], %[[c0]] : memref<?x8xf32>
- // LINALG: %[[cmp0:.*]] = cmpi "sle", %[[idx0]], %[[d0]] : index
+ // LINALG: %[[cmp0:.*]] = cmpi sle, %[[idx0]], %[[d0]] : index
// %j + 8 <= dim(%A, 1)
// LINALG: %[[idx1:.*]] = affine.apply #[[$map_p8]]()[%[[j]]]
- // LINALG: %[[cmp1:.*]] = cmpi "sle", %[[idx1]], %[[c8]] : index
+ // LINALG: %[[cmp1:.*]] = cmpi sle, %[[idx1]], %[[c8]] : index
// are both conds true
// LINALG: %[[cond:.*]] = and %[[cmp0]], %[[cmp1]] : i1
// LINALG: %[[ifres:.*]]:3 = scf.if %[[cond]] -> (memref<?x8xf32>, index, index) {
@@ -121,10 +121,10 @@ func @split_vector_transfer_read_strided_2d(
// CHECK: %[[alloc:.*]] = alloca() {alignment = 32 : i64} : memref<4x8xf32>
// %i + 4 <= dim(%A, 0)
// CHECK: %[[idx0:.*]] = affine.apply #[[$map_p4]]()[%[[i]]]
- // CHECK: %[[cmp0:.*]] = cmpi "sle", %[[idx0]], %[[c7]] : index
+ // CHECK: %[[cmp0:.*]] = cmpi sle, %[[idx0]], %[[c7]] : index
// %j + 8 <= dim(%A, 1)
// CHECK: %[[idx1:.*]] = affine.apply #[[$map_p8]]()[%[[j]]]
- // CHECK: %[[cmp1:.*]] = cmpi "sle", %[[idx1]], %[[c8]] : index
+ // CHECK: %[[cmp1:.*]] = cmpi sle, %[[idx1]], %[[c8]] : index
// are both conds true
// CHECK: %[[cond:.*]] = and %[[cmp0]], %[[cmp1]] : i1
// CHECK: %[[ifres:.*]]:3 = scf.if %[[cond]] -> (memref<?x8xf32, #[[$map_2d_stride_1]]>, index, index) {
@@ -159,10 +159,10 @@ func @split_vector_transfer_read_strided_2d(
// LINALG: %[[alloc:.*]] = alloca() {alignment = 32 : i64} : memref<4x8xf32>
// %i + 4 <= dim(%A, 0)
// LINALG: %[[idx0:.*]] = affine.apply #[[$map_p4]]()[%[[i]]]
- // LINALG: %[[cmp0:.*]] = cmpi "sle", %[[idx0]], %[[c7]] : index
+ // LINALG: %[[cmp0:.*]] = cmpi sle, %[[idx0]], %[[c7]] : index
// %j + 8 <= dim(%A, 1)
// LINALG: %[[idx1:.*]] = affine.apply #[[$map_p8]]()[%[[j]]]
- // LINALG: %[[cmp1:.*]] = cmpi "sle", %[[idx1]], %[[c8]] : index
+ // LINALG: %[[cmp1:.*]] = cmpi sle, %[[idx1]], %[[c8]] : index
// are both conds true
// LINALG: %[[cond:.*]] = and %[[cmp0]], %[[cmp1]] : i1
// LINALG: %[[ifres:.*]]:3 = scf.if %[[cond]] -> (memref<?x8xf32, #[[$map_2d_stride_1]]>, index, index) {
diff --git a/mlir/test/Dialect/Vector/vector-transforms.mlir b/mlir/test/Dialect/Vector/vector-transforms.mlir
index 754c7cc04088..e1c79de4efae 100644
--- a/mlir/test/Dialect/Vector/vector-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-transforms.mlir
@@ -526,10 +526,10 @@ func @shape_cast_fold(%arg0 : vector<5x4x2xf32>, %arg1 : vector<3x4x2xf32>)
// CHECK: %[[VT5:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C2]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT6:.*]] = vector.transfer_read %[[ARG1]][%[[C2]], %[[C0]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT7:.*]] = vector.transfer_read %[[ARG1]][%[[C2]], %[[C2]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
-// CHECK: %[[CMP0:.*]] = cmpf "ult", %[[VT0]], %[[VT4]] : vector<2x2xf32>
-// CHECK: %[[CMP1:.*]] = cmpf "ult", %[[VT1]], %[[VT5]] : vector<2x2xf32>
-// CHECK: %[[CMP2:.*]] = cmpf "ult", %[[VT2]], %[[VT6]] : vector<2x2xf32>
-// CHECK: %[[CMP3:.*]] = cmpf "ult", %[[VT3]], %[[VT7]] : vector<2x2xf32>
+// CHECK: %[[CMP0:.*]] = cmpf ult, %[[VT0]], %[[VT4]] : vector<2x2xf32>
+// CHECK: %[[CMP1:.*]] = cmpf ult, %[[VT1]], %[[VT5]] : vector<2x2xf32>
+// CHECK: %[[CMP2:.*]] = cmpf ult, %[[VT2]], %[[VT6]] : vector<2x2xf32>
+// CHECK: %[[CMP3:.*]] = cmpf ult, %[[VT3]], %[[VT7]] : vector<2x2xf32>
// CHECK: %[[VT0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C2]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT2:.*]] = vector.transfer_read %[[ARG0]][%[[C2]], %[[C0]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
@@ -551,7 +551,7 @@ func @elementwise_unroll(%arg0 : memref<4x4xf32>, %arg1 : memref<4x4xf32>) {
%cf0 = constant 0.0 : f32
%0 = vector.transfer_read %arg0[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
%1 = vector.transfer_read %arg1[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
- %cond = cmpf "ult", %0, %1 : vector<4x4xf32>
+ %cond = cmpf ult, %0, %1 : vector<4x4xf32>
// Vector transfer split pattern only support single user right now.
%2 = vector.transfer_read %arg0[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
%3 = vector.transfer_read %arg1[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
diff --git a/mlir/test/EDSC/builder-api-test.cpp b/mlir/test/EDSC/builder-api-test.cpp
index db200ba5f90f..241cc1a5ebad 100644
--- a/mlir/test/EDSC/builder-api-test.cpp
+++ b/mlir/test/EDSC/builder-api-test.cpp
@@ -565,43 +565,43 @@ TEST_FUNC(select_op_i32) {
// CHECK-LABEL: @select_op
// CHECK: affine.for %{{.*}} = 0 to 1 {
// CHECK-NEXT: affine.for %{{.*}} = 0 to 1 {
- // CHECK-DAG: {{.*}} = cmpi "eq"
+ // CHECK-DAG: {{.*}} = cmpi eq
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "ne"
+ // CHECK-DAG: {{.*}} = cmpi ne
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "slt"
+ // CHECK-DAG: {{.*}} = cmpi slt
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "sle"
+ // CHECK-DAG: {{.*}} = cmpi sle
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "sgt"
+ // CHECK-DAG: {{.*}} = cmpi sgt
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "sge"
+ // CHECK-DAG: {{.*}} = cmpi sge
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "ult"
+ // CHECK-DAG: {{.*}} = cmpi ult
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "ule"
+ // CHECK-DAG: {{.*}} = cmpi ule
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "ugt"
+ // CHECK-DAG: {{.*}} = cmpi ugt
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
- // CHECK-DAG: {{.*}} = cmpi "uge"
+ // CHECK-DAG: {{.*}} = cmpi uge
// CHECK-DAG: {{.*}} = affine.load
// CHECK-DAG: {{.*}} = affine.load
// CHECK-NEXT: {{.*}} = select
@@ -641,70 +641,70 @@ TEST_FUNC(select_op_f32) {
// CHECK-LABEL: @select_op
// CHECK: affine.for %{{.*}} = 0 to 1 {
// CHECK-NEXT: affine.for %{{.*}} = 0 to 1 {
- // CHECK-DAG: cmpf "oeq"
+ // CHECK-DAG: cmpf oeq
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "one"
+ // CHECK-DAG: cmpf one
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "oge"
+ // CHECK-DAG: cmpf oge
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "ole"
+ // CHECK-DAG: cmpf ole
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "olt"
+ // CHECK-DAG: cmpf olt
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "ogt"
+ // CHECK-DAG: cmpf ogt
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "oge"
+ // CHECK-DAG: cmpf oge
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "ole"
+ // CHECK-DAG: cmpf ole
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "olt"
+ // CHECK-DAG: cmpf olt
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.apply
// CHECK-NEXT: select
- // CHECK-DAG: cmpf "ogt"
+ // CHECK-DAG: cmpf ogt
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
// CHECK-DAG: affine.load
@@ -903,7 +903,7 @@ TEST_FUNC(affine_if_op) {
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK-SAME: ins({{.*}}memref<?x?xf32>, memref<?x?xf32>)
// CHECK-SAME: outs({{.*}}memref<?x?xf32>)
-// CHECK: cmpf "ogt"
+// CHECK: cmpf ogt
// CHECK: select
// CHECK: linalg.generic {
// CHECK-SAME: indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
@@ -1078,7 +1078,7 @@ TEST_FUNC(linalg_metadata_ops) {
// CHECK-SAME: indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d0, d1)>],
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
// CHECK-SAME: ins(%{{[a-z0-9]*}}, %{{[a-z0-9]*}} : tensor<?x?xf32>, tensor<?x?xf32>)
-// CHECK: cmpf "ogt"
+// CHECK: cmpf ogt
// CHECK: select
// CHECK: } -> tensor<?x?xf32>
// CHECK: linalg.generic {
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 396211c10430..1deeb3ec49d0 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -137,27 +137,27 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
// CHECK: %cst_5 = constant dense<0> : vector<42xi32>
%vci32 = constant dense<0> : vector<42 x i32>
- // CHECK: %{{[0-9]+}} = cmpi "eq", %{{[0-9]+}}, %{{[0-9]+}} : i32
- %14 = cmpi "eq", %i3, %i4 : i32
+ // CHECK: %{{[0-9]+}} = cmpi eq, %{{[0-9]+}}, %{{[0-9]+}} : i32
+ %14 = cmpi eq, %i3, %i4 : i32
// Predicate 1 means inequality comparison.
- // CHECK: %{{[0-9]+}} = cmpi "ne", %{{[0-9]+}}, %{{[0-9]+}} : i32
+ // CHECK: %{{[0-9]+}} = cmpi ne, %{{[0-9]+}}, %{{[0-9]+}} : i32
%15 = "std.cmpi"(%i3, %i4) {predicate = 1} : (i32, i32) -> i1
- // CHECK: %{{[0-9]+}} = cmpi "slt", %cst_3, %cst_3 : vector<4xi32>
- %16 = cmpi "slt", %13, %13 : vector<4 x i32>
+ // CHECK: %{{[0-9]+}} = cmpi slt, %cst_3, %cst_3 : vector<4xi32>
+ %16 = cmpi slt, %13, %13 : vector<4 x i32>
- // CHECK: %{{[0-9]+}} = cmpi "ne", %cst_3, %cst_3 : vector<4xi32>
+ // CHECK: %{{[0-9]+}} = cmpi ne, %cst_3, %cst_3 : vector<4xi32>
%17 = "std.cmpi"(%13, %13) {predicate = 1} : (vector<4 x i32>, vector<4 x i32>) -> vector<4 x i1>
- // CHECK: %{{[0-9]+}} = cmpi "slt", %arg3, %arg3 : index
- %18 = cmpi "slt", %idx, %idx : index
+ // CHECK: %{{[0-9]+}} = cmpi slt, %arg3, %arg3 : index
+ %18 = cmpi slt, %idx, %idx : index
- // CHECK: %{{[0-9]+}} = cmpi "eq", %cst_4, %cst_4 : tensor<42xi32>
- %19 = cmpi "eq", %tci32, %tci32 : tensor<42 x i32>
+ // CHECK: %{{[0-9]+}} = cmpi eq, %cst_4, %cst_4 : tensor<42xi32>
+ %19 = cmpi eq, %tci32, %tci32 : tensor<42 x i32>
- // CHECK: %{{[0-9]+}} = cmpi "eq", %cst_5, %cst_5 : vector<42xi32>
- %20 = cmpi "eq", %vci32, %vci32 : vector<42 x i32>
+ // CHECK: %{{[0-9]+}} = cmpi eq, %cst_5, %cst_5 : vector<42xi32>
+ %20 = cmpi eq, %vci32, %vci32 : vector<42 x i32>
// CHECK: %{{[0-9]+}} = select %{{[0-9]+}}, %arg3, %arg3 : index
%21 = select %18, %idx, %idx : index
@@ -292,24 +292,24 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
%tcf32 = constant dense<0.> : tensor<42 x f32>
%vcf32 = constant dense<0.> : vector<4 x f32>
- // CHECK: %{{[0-9]+}} = cmpf "ogt", %{{[0-9]+}}, %{{[0-9]+}} : f32
- %65 = cmpf "ogt", %f3, %f4 : f32
+ // CHECK: %{{[0-9]+}} = cmpf ogt, %{{[0-9]+}}, %{{[0-9]+}} : f32
+ %65 = cmpf ogt, %f3, %f4 : f32
// Predicate 0 means ordered equality comparison.
- // CHECK: %{{[0-9]+}} = cmpf "oeq", %{{[0-9]+}}, %{{[0-9]+}} : f32
+ // CHECK: %{{[0-9]+}} = cmpf oeq, %{{[0-9]+}}, %{{[0-9]+}} : f32
%66 = "std.cmpf"(%f3, %f4) {predicate = 1} : (f32, f32) -> i1
- // CHECK: %{{[0-9]+}} = cmpf "olt", %cst_8, %cst_8 : vector<4xf32>
- %67 = cmpf "olt", %vcf32, %vcf32 : vector<4 x f32>
+ // CHECK: %{{[0-9]+}} = cmpf olt, %cst_8, %cst_8 : vector<4xf32>
+ %67 = cmpf olt, %vcf32, %vcf32 : vector<4 x f32>
- // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_8, %cst_8 : vector<4xf32>
+ // CHECK: %{{[0-9]+}} = cmpf oeq, %cst_8, %cst_8 : vector<4xf32>
%68 = "std.cmpf"(%vcf32, %vcf32) {predicate = 1} : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x i1>
- // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_7, %cst_7 : tensor<42xf32>
- %69 = cmpf "oeq", %tcf32, %tcf32 : tensor<42 x f32>
+ // CHECK: %{{[0-9]+}} = cmpf oeq, %cst_7, %cst_7 : tensor<42xf32>
+ %69 = cmpf oeq, %tcf32, %tcf32 : tensor<42 x f32>
- // CHECK: %{{[0-9]+}} = cmpf "oeq", %cst_8, %cst_8 : vector<4xf32>
- %70 = cmpf "oeq", %vcf32, %vcf32 : vector<4 x f32>
+ // CHECK: %{{[0-9]+}} = cmpf oeq, %cst_8, %cst_8 : vector<4xf32>
+ %70 = cmpf oeq, %vcf32, %vcf32 : vector<4 x f32>
// CHECK: %{{[0-9]+}} = rank %arg0 : tensor<4x4x?xf32>
%71 = "std.rank"(%t) : (tensor<4x4x?xf32>) -> index
@@ -875,8 +875,8 @@ func @unranked_tensor_load_store(%0 : memref<*xi32>) {
// CHECK-LABEL: func @atomic_rmw
// CHECK-SAME: ([[BUF:%.*]]: memref<10xf32>, [[VAL:%.*]]: f32, [[I:%.*]]: index)
func @atomic_rmw(%I: memref<10xf32>, %val: f32, %i : index) {
- %x = atomic_rmw "addf" %val, %I[%i] : (f32, memref<10xf32>) -> f32
- // CHECK: atomic_rmw "addf" [[VAL]], [[BUF]]{{\[}}[[I]]]
+ %x = atomic_rmw addf %val, %I[%i] : (f32, memref<10xf32>) -> f32
+ // CHECK: atomic_rmw addf [[VAL]], [[BUF]]{{\[}}[[I]]]
return
}
diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index ff39611eaea1..45ebfff34d57 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -196,7 +196,7 @@ func @func_with_ops(i32) {
// Comparison are defined for arguments of the same type.
func @func_with_ops(i32, i64) {
^bb0(%a : i32, %b : i64): // expected-note {{prior use here}}
- %r = cmpi "eq", %a, %b : i32 // expected-error {{use of value '%b' expects
diff erent type than prior uses}}
+ %r = cmpi eq, %a, %b : i32 // expected-error {{use of value '%b' expects
diff erent type than prior uses}}
}
// -----
@@ -204,7 +204,7 @@ func @func_with_ops(i32, i64) {
// Comparisons must have the "predicate" attribute.
func @func_with_ops(i32, i32) {
^bb0(%a : i32, %b : i32):
- %r = cmpi %a, %b : i32 // expected-error {{expected non-function type}}
+ %r = cmpi %a, %b : i32 // expected-error {{expected string or keyword containing one of the following enum values}}
}
// -----
@@ -212,7 +212,7 @@ func @func_with_ops(i32, i32) {
// Integer comparisons are not recognized for float types.
func @func_with_ops(f32, f32) {
^bb0(%a : f32, %b : f32):
- %r = cmpi "eq", %a, %b : f32 // expected-error {{'lhs' must be signless-integer-like, but got 'f32'}}
+ %r = cmpi eq, %a, %b : f32 // expected-error {{'lhs' must be signless-integer-like, but got 'f32'}}
}
// -----
@@ -285,7 +285,7 @@ func @func_with_ops(tensor<12xi1>, tensor<42xi32>, tensor<42xi32>) {
func @invalid_cmp_shape(%idx : () -> ()) {
// expected-error at +1 {{'lhs' must be signless-integer-like, but got '() -> ()'}}
- %cmp = cmpi "eq", %idx, %idx : () -> ()
+ %cmp = cmpi eq, %idx, %idx : () -> ()
// -----
@@ -446,7 +446,7 @@ func @dma_wait_wrong_num_elements_type(%tag : memref<2xi32>, %idx: index, %flt:
// -----
func @invalid_cmp_attr(%idx : i32) {
- // expected-error at +1 {{invalid kind of attribute specified}}
+ // expected-error at +1 {{expected string or keyword containing one of the following enum values}}
%cmp = cmpi i1, %idx, %idx : i32
// -----
@@ -459,22 +459,22 @@ func @cmpf_generic_invalid_predicate_value(%a : f32) {
// -----
func @cmpf_canonical_invalid_predicate_value(%a : f32) {
- // expected-error at +1 {{invalid predicate attribute specification: "foo"}}
- %r = cmpf "foo", %a, %a : f32
+ // expected-error at +1 {{expected string or keyword containing one of the following enum values}}
+ %r = cmpf foo, %a, %a : f32
}
// -----
func @cmpf_canonical_invalid_predicate_value_signed(%a : f32) {
- // expected-error at +1 {{invalid predicate attribute specification: "sge"}}
- %r = cmpf "sge", %a, %a : f32
+ // expected-error at +1 {{expected string or keyword containing one of the following enum values}}
+ %r = cmpf sge, %a, %a : f32
}
// -----
func @cmpf_canonical_invalid_predicate_value_no_order(%a : f32) {
- // expected-error at +1 {{invalid predicate attribute specification: "eq"}}
- %r = cmpf "eq", %a, %a : f32
+ // expected-error at +1 {{expected string or keyword containing one of the following enum values}}
+ %r = cmpf eq, %a, %a : f32
}
// -----
@@ -493,7 +493,7 @@ func @cmpf_generic_no_predicate_attr(%a : f32, %b : f32) {
// -----
func @cmpf_wrong_type(%a : i32, %b : i32) {
- %r = cmpf "oeq", %a, %b : i32 // expected-error {{must be floating-point-like}}
+ %r = cmpf oeq, %a, %b : i32 // expected-error {{must be floating-point-like}}
}
// -----
@@ -506,7 +506,7 @@ func @cmpf_generic_wrong_result_type(%a : f32, %b : f32) {
// -----
func @cmpf_canonical_wrong_result_type(%a : f32, %b : f32) -> f32 {
- %r = cmpf "oeq", %a, %b : f32 // expected-note {{prior use here}}
+ %r = cmpf oeq, %a, %b : f32 // expected-note {{prior use here}}
// expected-error at +1 {{use of value '%r' expects
diff erent type than prior uses}}
return %r : f32
}
@@ -536,7 +536,7 @@ func @cmpf_generic_operand_type_mismatch(%a : f32, %b : f64) {
func @cmpf_canonical_type_mismatch(%a : f32, %b : f64) { // expected-note {{prior use here}}
// expected-error at +1 {{use of value '%b' expects
diff erent type than prior uses}}
- %r = cmpf "oeq", %a, %b : f32
+ %r = cmpf oeq, %a, %b : f32
}
// -----
@@ -1083,7 +1083,7 @@ func @invalid_memref_cast() {
func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f32) {
// expected-error at +1 {{expects the number of subscripts to be equal to memref rank}}
- %x = atomic_rmw "addf" %val, %I[%i] : (f32, memref<16x10xf32>) -> f32
+ %x = atomic_rmw addf %val, %I[%i] : (f32, memref<16x10xf32>) -> f32
return
}
@@ -1091,7 +1091,7 @@ func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f3
func @atomic_rmw_expects_float(%I: memref<16x10xi32>, %i : index, %val : i32) {
// expected-error at +1 {{expects a floating-point type}}
- %x = atomic_rmw "addf" %val, %I[%i, %i] : (i32, memref<16x10xi32>) -> i32
+ %x = atomic_rmw addf %val, %I[%i, %i] : (i32, memref<16x10xi32>) -> i32
return
}
@@ -1099,7 +1099,7 @@ func @atomic_rmw_expects_float(%I: memref<16x10xi32>, %i : index, %val : i32) {
func @atomic_rmw_expects_int(%I: memref<16x10xf32>, %i : index, %val : f32) {
// expected-error at +1 {{expects an integer type}}
- %x = atomic_rmw "addi" %val, %I[%i, %i] : (f32, memref<16x10xf32>) -> f32
+ %x = atomic_rmw addi %val, %I[%i, %i] : (f32, memref<16x10xf32>) -> f32
return
}
@@ -1259,4 +1259,4 @@ func @no_zero_bit_integer_attrs() {
// expected-error @+1 {{integer constant out of range for attribute}}
%x = "some.op"(){value = 0 : i0} : () -> f32
return
-}
\ No newline at end of file
+}
diff --git a/mlir/test/IR/invalid.mlir b/mlir/test/IR/invalid.mlir
index 4e6c950d637a..bb9c6a552385 100644
--- a/mlir/test/IR/invalid.mlir
+++ b/mlir/test/IR/invalid.mlir
@@ -811,7 +811,7 @@ func @mixed_named_arguments(f32,
// `tensor` as operator rather than as a type.
func @f(f32) {
^bb0(%a : f32):
- %18 = cmpi "slt", %idx, %idx : index
+ %18 = cmpi slt, %idx, %idx : index
tensor<42 x index // expected-error {{custom op 'tensor' is unknown}}
return
}
diff --git a/mlir/test/Target/SPIRV/barrier-ops.mlir b/mlir/test/Target/SPIRV/barrier-ops.mlir
index 4c5735d86876..a3b80e06a35e 100644
--- a/mlir/test/Target/SPIRV/barrier-ops.mlir
+++ b/mlir/test/Target/SPIRV/barrier-ops.mlir
@@ -2,23 +2,23 @@
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], []> {
spv.func @memory_barrier_0() -> () "None" {
- // CHECK: spv.MemoryBarrier "Device", "Release|UniformMemory"
- spv.MemoryBarrier "Device", "Release|UniformMemory"
+ // CHECK: spv.MemoryBarrier Device, "Release|UniformMemory"
+ spv.MemoryBarrier Device, "Release|UniformMemory"
spv.Return
}
spv.func @memory_barrier_1() -> () "None" {
- // CHECK: spv.MemoryBarrier "Subgroup", "AcquireRelease|SubgroupMemory"
- spv.MemoryBarrier "Subgroup", "AcquireRelease|SubgroupMemory"
+ // CHECK: spv.MemoryBarrier Subgroup, "AcquireRelease|SubgroupMemory"
+ spv.MemoryBarrier Subgroup, "AcquireRelease|SubgroupMemory"
spv.Return
}
spv.func @control_barrier_0() -> () "None" {
- // CHECK: spv.ControlBarrier "Device", "Workgroup", "Release|UniformMemory"
- spv.ControlBarrier "Device", "Workgroup", "Release|UniformMemory"
+ // CHECK: spv.ControlBarrier Device, Workgroup, "Release|UniformMemory"
+ spv.ControlBarrier Device, Workgroup, "Release|UniformMemory"
spv.Return
}
spv.func @control_barrier_1() -> () "None" {
- // CHECK: spv.ControlBarrier "Workgroup", "Invocation", "AcquireRelease|UniformMemory"
- spv.ControlBarrier "Workgroup", "Invocation", "AcquireRelease|UniformMemory"
+ // CHECK: spv.ControlBarrier Workgroup, Invocation, "AcquireRelease|UniformMemory"
+ spv.ControlBarrier Workgroup, Invocation, "AcquireRelease|UniformMemory"
spv.Return
}
}
diff --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir
index b3aaf63856a5..6442e00492e0 100644
--- a/mlir/test/Target/SPIRV/group-ops.mlir
+++ b/mlir/test/Target/SPIRV/group-ops.mlir
@@ -9,14 +9,14 @@ spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], []> {
}
// CHECK-LABEL: @group_broadcast_1
spv.func @group_broadcast_1(%value: f32, %localid: i32 ) -> f32 "None" {
- // CHECK: spv.GroupBroadcast "Workgroup" %{{.*}}, %{{.*}} : f32, i32
- %0 = spv.GroupBroadcast "Workgroup" %value, %localid : f32, i32
+ // CHECK: spv.GroupBroadcast Workgroup %{{.*}}, %{{.*}} : f32, i32
+ %0 = spv.GroupBroadcast Workgroup %value, %localid : f32, i32
spv.ReturnValue %0: f32
}
// CHECK-LABEL: @group_broadcast_2
spv.func @group_broadcast_2(%value: f32, %localid: vector<3xi32> ) -> f32 "None" {
- // CHECK: spv.GroupBroadcast "Workgroup" %{{.*}}, %{{.*}} : f32, vector<3xi32>
- %0 = spv.GroupBroadcast "Workgroup" %value, %localid : f32, vector<3xi32>
+ // CHECK: spv.GroupBroadcast Workgroup %{{.*}}, %{{.*}} : f32, vector<3xi32>
+ %0 = spv.GroupBroadcast Workgroup %value, %localid : f32, vector<3xi32>
spv.ReturnValue %0: f32
}
// CHECK-LABEL: @subgroup_block_read_intel
diff --git a/mlir/test/Target/SPIRV/non-uniform-ops.mlir b/mlir/test/Target/SPIRV/non-uniform-ops.mlir
index f7b8f6cfc185..5cb035f02282 100644
--- a/mlir/test/Target/SPIRV/non-uniform-ops.mlir
+++ b/mlir/test/Target/SPIRV/non-uniform-ops.mlir
@@ -3,23 +3,23 @@
spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], []> {
// CHECK-LABEL: @group_non_uniform_ballot
spv.func @group_non_uniform_ballot(%predicate: i1) -> vector<4xi32> "None" {
- // CHECK: %{{.*}} = spv.GroupNonUniformBallot "Workgroup" %{{.*}}: vector<4xi32>
- %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32>
+ // CHECK: %{{.*}} = spv.GroupNonUniformBallot Workgroup %{{.*}}: vector<4xi32>
+ %0 = spv.GroupNonUniformBallot Workgroup %predicate : vector<4xi32>
spv.ReturnValue %0: vector<4xi32>
}
// CHECK-LABEL: @group_non_uniform_broadcast
spv.func @group_non_uniform_broadcast(%value: f32) -> f32 "None" {
%one = spv.constant 1 : i32
- // CHECK: spv.GroupNonUniformBroadcast "Subgroup" %{{.*}}, %{{.*}} : f32, i32
- %0 = spv.GroupNonUniformBroadcast "Subgroup" %value, %one : f32, i32
+ // CHECK: spv.GroupNonUniformBroadcast Subgroup %{{.*}}, %{{.*}} : f32, i32
+ %0 = spv.GroupNonUniformBroadcast Subgroup %value, %one : f32, i32
spv.ReturnValue %0: f32
}
// CHECK-LABEL: @group_non_uniform_elect
spv.func @group_non_uniform_elect() -> i1 "None" {
- // CHECK: %{{.+}} = spv.GroupNonUniformElect "Workgroup" : i1
- %0 = spv.GroupNonUniformElect "Workgroup" : i1
+ // CHECK: %{{.+}} = spv.GroupNonUniformElect Workgroup : i1
+ %0 = spv.GroupNonUniformElect Workgroup : i1
spv.ReturnValue %0: i1
}
diff --git a/mlir/test/Transforms/buffer-deallocation.mlir b/mlir/test/Transforms/buffer-deallocation.mlir
index f0eccbd2ae82..f61d501dff9c 100644
--- a/mlir/test/Transforms/buffer-deallocation.mlir
+++ b/mlir/test/Transforms/buffer-deallocation.mlir
@@ -598,7 +598,7 @@ func @memref_in_function_results(
func @nested_region_control_flow(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -628,7 +628,7 @@ func @nested_region_control_flow(
func @nested_region_control_flow_div(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -844,7 +844,7 @@ func @nestedRegionsAndCondBranchAlloca(
func @nestedRegionControlFlowAlloca(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -878,7 +878,7 @@ func @loop_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = alloc() : memref<2xf32>
scf.yield %3 : memref<2xf32>
}
@@ -921,7 +921,7 @@ func @loop_nested_if_no_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
scf.yield %0 : memref<2xf32>
} else {
@@ -961,7 +961,7 @@ func @loop_nested_if_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
%4 = alloc() : memref<2xf32>
scf.yield %4 : memref<2xf32>
@@ -1021,7 +1021,7 @@ func @loop_nested_alloc(
%3 = scf.for %i3 = %lb to %ub step %step
iter_args(%iterBuf3 = %iterBuf2) -> memref<2xf32> {
%4 = alloc() : memref<2xf32>
- %5 = cmpi "eq", %i, %ub : index
+ %5 = cmpi eq, %i, %ub : index
%6 = scf.if %5 -> (memref<2xf32>) {
%7 = alloc() : memref<2xf32>
scf.yield %7 : memref<2xf32>
@@ -1104,7 +1104,7 @@ func @loop_dynalloc(
br ^loopHeader(%const0, %arg2 : i32, memref<?xf32>)
^loopHeader(%i : i32, %buff : memref<?xf32>):
- %lessThan = cmpi "slt", %i, %arg1 : i32
+ %lessThan = cmpi slt, %i, %arg1 : i32
cond_br %lessThan,
^loopBody(%i, %buff : i32, memref<?xf32>),
^exit(%buff : memref<?xf32>)
@@ -1145,7 +1145,7 @@ func @do_loop_alloc(
br ^loopHeader(%inc, %alloc1 : i32, memref<2xf32>)
^loopHeader(%i : i32, %buff : memref<2xf32>):
- %lessThan = cmpi "slt", %i, %arg1 : i32
+ %lessThan = cmpi slt, %i, %arg1 : i32
cond_br %lessThan,
^loopBody(%i, %buff : i32, memref<2xf32>),
^exit(%buff : memref<2xf32>)
diff --git a/mlir/test/Transforms/buffer-hoisting.mlir b/mlir/test/Transforms/buffer-hoisting.mlir
index 2112e5cf9dc3..7598dafd6643 100644
--- a/mlir/test/Transforms/buffer-hoisting.mlir
+++ b/mlir/test/Transforms/buffer-hoisting.mlir
@@ -384,7 +384,7 @@ func @nested_regions_and_cond_branch(
func @nested_region_control_flow(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -410,7 +410,7 @@ func @nested_region_control_flow(
func @nested_region_control_flow_div(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -436,7 +436,7 @@ func @nested_region_control_flow_div(
func @nested_region_control_flow_div_nested(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
%3 = scf.if %0 -> (memref<?x?xf32>) {
@@ -621,7 +621,7 @@ func @loop_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = alloc() : memref<2xf32>
scf.yield %3 : memref<2xf32>
}
@@ -648,7 +648,7 @@ func @loop_nested_if_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
%4 = alloc() : memref<2xf32>
scf.yield %4 : memref<2xf32>
@@ -685,7 +685,7 @@ func @loop_nested_alloc(
%3 = scf.for %i3 = %lb to %ub step %step
iter_args(%iterBuf3 = %iterBuf2) -> memref<2xf32> {
%4 = alloc() : memref<2xf32>
- %5 = cmpi "eq", %i, %ub : index
+ %5 = cmpi eq, %i, %ub : index
%6 = scf.if %5 -> (memref<2xf32>) {
%7 = alloc() : memref<2xf32>
scf.yield %7 : memref<2xf32>
@@ -726,7 +726,7 @@ func @loop_nested_alloc_dyn_dependency(
iter_args(%iterBuf2 = %iterBuf) -> memref<?xf32> {
%3 = scf.for %i3 = %lb to %ub step %step
iter_args(%iterBuf3 = %iterBuf2) -> memref<?xf32> {
- %5 = cmpi "eq", %i, %ub : index
+ %5 = cmpi eq, %i, %ub : index
%6 = scf.if %5 -> (memref<?xf32>) {
%7 = alloc(%i3) : memref<?xf32>
scf.yield %7 : memref<?xf32>
diff --git a/mlir/test/Transforms/buffer-loop-hoisting.mlir b/mlir/test/Transforms/buffer-loop-hoisting.mlir
index 4b4313f9978e..9708f2962e54 100644
--- a/mlir/test/Transforms/buffer-loop-hoisting.mlir
+++ b/mlir/test/Transforms/buffer-loop-hoisting.mlir
@@ -110,7 +110,7 @@ func @nested_regions_and_cond_branch(
func @nested_region_control_flow(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -141,7 +141,7 @@ func @loop_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = alloc() : memref<2xf32>
scf.yield %3 : memref<2xf32>
}
@@ -168,7 +168,7 @@ func @loop_nested_if_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
%4 = alloc() : memref<2xf32>
scf.yield %4 : memref<2xf32>
@@ -206,7 +206,7 @@ func @loop_nested_alloc(
%3 = scf.for %i3 = %lb to %ub step %step
iter_args(%iterBuf3 = %iterBuf2) -> memref<2xf32> {
%4 = alloc() : memref<2xf32>
- %5 = cmpi "eq", %i, %ub : index
+ %5 = cmpi eq, %i, %ub : index
%6 = scf.if %5 -> (memref<2xf32>) {
%7 = alloc() : memref<2xf32>
%8 = alloc() : memref<2xf32>
@@ -253,7 +253,7 @@ func @loop_nested_alloc_dyn_dependency(
%3 = scf.for %i3 = %lb to %ub step %step
iter_args(%iterBuf3 = %iterBuf2) -> memref<?xf32> {
%4 = alloc(%i3) : memref<?xf32>
- %5 = cmpi "eq", %i, %ub : index
+ %5 = cmpi eq, %i, %ub : index
%6 = scf.if %5 -> (memref<?xf32>) {
%7 = alloc(%i3) : memref<?xf32>
scf.yield %7 : memref<?xf32>
@@ -360,7 +360,7 @@ func @no_hoist_one_loop_conditional(
%res: memref<2xf32>) {
%0 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %1 = cmpi "eq", %i, %ub : index
+ %1 = cmpi eq, %i, %ub : index
%2 = scf.if %1 -> (memref<2xf32>) {
%3 = alloc() : memref<2xf32>
scf.yield %3 : memref<2xf32>
@@ -387,7 +387,7 @@ func @hoist_one_loop_conditional(
%buf: memref<2xf32>,
%res: memref<2xf32>) {
%0 = alloc() : memref<2xf32>
- %1 = cmpi "eq", %lb, %ub : index
+ %1 = cmpi eq, %lb, %ub : index
%2 = scf.if %1 -> (memref<2xf32>) {
%3 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir
index 0721835a86c6..98d274a63fc2 100644
--- a/mlir/test/Transforms/canonicalize-block-merge.mlir
+++ b/mlir/test/Transforms/canonicalize-block-merge.mlir
@@ -231,7 +231,7 @@ func private @print(%arg0: i32, %arg1: i32)
// CHECK-LABEL: @nomerge
func @nomerge(%arg0: i32, %i: i32) {
%c1_i32 = constant 1 : i32
- %icmp = cmpi "slt", %i, %arg0 : i32
+ %icmp = cmpi slt, %i, %arg0 : i32
cond_br %icmp, ^bb2, ^bb3
^bb2: // pred: ^bb1
@@ -243,7 +243,7 @@ func @nomerge(%arg0: i32, %i: i32) {
br ^bb4(%jp1 : i32)
^bb4(%j: i32): // 2 preds: ^bb2, ^bb7
- %jcmp = cmpi "slt", %j, %arg0 : i32
+ %jcmp = cmpi slt, %j, %arg0 : i32
// CHECK-NOT: call @print(%[[arg1:.+]], %[[arg1]])
call @print(%j, %ip1) : (i32, i32) -> ()
cond_br %jcmp, ^bb7, ^bb3
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index f2296161ed7a..e3e4b56fd072 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -577,7 +577,7 @@ func @lowered_affine_mod() -> (index, index) {
%c42 = constant 42 : index
%0 = remi_signed %c-43, %c42 : index
%c0 = constant 0 : index
- %1 = cmpi "slt", %0, %c0 : index
+ %1 = cmpi slt, %0, %c0 : index
%2 = addi %0, %c42 : index
%3 = select %1, %2, %0 : index
// CHECK-NEXT: {{.*}} = constant 1 : index
@@ -585,7 +585,7 @@ func @lowered_affine_mod() -> (index, index) {
%c42_0 = constant 42 : index
%4 = remi_signed %c43, %c42_0 : index
%c0_1 = constant 0 : index
- %5 = cmpi "slt", %4, %c0_1 : index
+ %5 = cmpi slt, %4, %c0_1 : index
%6 = addi %4, %c42_0 : index
%7 = select %5, %6, %4 : index
return %3, %7 : index, index
@@ -603,7 +603,7 @@ func @lowered_affine_floordiv() -> (index, index) {
%c42 = constant 42 : index
%c0 = constant 0 : index
%c-1 = constant -1 : index
- %0 = cmpi "slt", %c-43, %c0 : index
+ %0 = cmpi slt, %c-43, %c0 : index
%1 = subi %c-1, %c-43 : index
%2 = select %0, %1, %c-43 : index
%3 = divi_signed %2, %c42 : index
@@ -614,7 +614,7 @@ func @lowered_affine_floordiv() -> (index, index) {
%c42_0 = constant 42 : index
%c0_1 = constant 0 : index
%c-1_2 = constant -1 : index
- %6 = cmpi "slt", %c43, %c0_1 : index
+ %6 = cmpi slt, %c43, %c0_1 : index
%7 = subi %c-1_2, %c43 : index
%8 = select %6, %7, %c43 : index
%9 = divi_signed %8, %c42_0 : index
@@ -635,7 +635,7 @@ func @lowered_affine_ceildiv() -> (index, index) {
%c42 = constant 42 : index
%c0 = constant 0 : index
%c1 = constant 1 : index
- %0 = cmpi "sle", %c-43, %c0 : index
+ %0 = cmpi sle, %c-43, %c0 : index
%1 = subi %c0, %c-43 : index
%2 = subi %c-43, %c1 : index
%3 = select %0, %1, %2 : index
@@ -648,7 +648,7 @@ func @lowered_affine_ceildiv() -> (index, index) {
%c42_0 = constant 42 : index
%c0_1 = constant 0 : index
%c1_2 = constant 1 : index
- %8 = cmpi "sle", %c43, %c0_1 : index
+ %8 = cmpi sle, %c43, %c0_1 : index
%9 = subi %c0_1, %c43 : index
%10 = subi %c43, %c1_2 : index
%11 = select %8, %9, %10 : index
diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir
index 31e58bf5c577..74dea976ab1f 100644
--- a/mlir/test/Transforms/constant-fold.mlir
+++ b/mlir/test/Transforms/constant-fold.mlir
@@ -563,25 +563,25 @@ func @cmpi() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1) {
// CHECK-DAG: [[F:%.+]] = constant false
// CHECK-DAG: [[T:%.+]] = constant true
// CHECK-NEXT: return [[F]],
- %0 = cmpi "eq", %c42, %cm1 : i32
+ %0 = cmpi eq, %c42, %cm1 : i32
// CHECK-SAME: [[T]],
- %1 = cmpi "ne", %c42, %cm1 : i32
+ %1 = cmpi ne, %c42, %cm1 : i32
// CHECK-SAME: [[F]],
- %2 = cmpi "slt", %c42, %cm1 : i32
+ %2 = cmpi slt, %c42, %cm1 : i32
// CHECK-SAME: [[F]],
- %3 = cmpi "sle", %c42, %cm1 : i32
+ %3 = cmpi sle, %c42, %cm1 : i32
// CHECK-SAME: [[T]],
- %4 = cmpi "sgt", %c42, %cm1 : i32
+ %4 = cmpi sgt, %c42, %cm1 : i32
// CHECK-SAME: [[T]],
- %5 = cmpi "sge", %c42, %cm1 : i32
+ %5 = cmpi sge, %c42, %cm1 : i32
// CHECK-SAME: [[T]],
- %6 = cmpi "ult", %c42, %cm1 : i32
+ %6 = cmpi ult, %c42, %cm1 : i32
// CHECK-SAME: [[T]],
- %7 = cmpi "ule", %c42, %cm1 : i32
+ %7 = cmpi ule, %c42, %cm1 : i32
// CHECK-SAME: [[F]],
- %8 = cmpi "ugt", %c42, %cm1 : i32
+ %8 = cmpi ugt, %c42, %cm1 : i32
// CHECK-SAME: [[F]]
- %9 = cmpi "uge", %c42, %cm1 : i32
+ %9 = cmpi uge, %c42, %cm1 : i32
return %0, %1, %2, %3, %4, %5, %6, %7, %8, %9 : i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
}
@@ -594,37 +594,37 @@ func @cmpf_normal_numbers() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1,
// CHECK-DAG: [[F:%.+]] = constant false
// CHECK-DAG: [[T:%.+]] = constant true
// CHECK-NEXT: return [[F]],
- %0 = cmpf "false", %c42, %cm1 : f32
+ %0 = cmpf false, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %1 = cmpf "oeq", %c42, %cm1 : f32
+ %1 = cmpf oeq, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %2 = cmpf "ogt", %c42, %cm1 : f32
+ %2 = cmpf ogt, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %3 = cmpf "oge", %c42, %cm1 : f32
+ %3 = cmpf oge, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %4 = cmpf "olt", %c42, %cm1 : f32
+ %4 = cmpf olt, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %5 = cmpf "ole", %c42, %cm1 : f32
+ %5 = cmpf ole, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %6 = cmpf "one", %c42, %cm1 : f32
+ %6 = cmpf one, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %7 = cmpf "ord", %c42, %cm1 : f32
+ %7 = cmpf ord, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %8 = cmpf "ueq", %c42, %cm1 : f32
+ %8 = cmpf ueq, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %9 = cmpf "ugt", %c42, %cm1 : f32
+ %9 = cmpf ugt, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %10 = cmpf "uge", %c42, %cm1 : f32
+ %10 = cmpf uge, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %11 = cmpf "ult", %c42, %cm1 : f32
+ %11 = cmpf ult, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %12 = cmpf "ule", %c42, %cm1 : f32
+ %12 = cmpf ule, %c42, %cm1 : f32
// CHECK-SAME: [[T]],
- %13 = cmpf "une", %c42, %cm1 : f32
+ %13 = cmpf une, %c42, %cm1 : f32
// CHECK-SAME: [[F]],
- %14 = cmpf "uno", %c42, %cm1 : f32
+ %14 = cmpf uno, %c42, %cm1 : f32
// CHECK-SAME: [[T]]
- %15 = cmpf "true", %c42, %cm1 : f32
+ %15 = cmpf true, %c42, %cm1 : f32
return %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15 : i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
}
@@ -637,37 +637,37 @@ func @cmpf_nan() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1,
// CHECK-DAG: [[F:%.+]] = constant false
// CHECK-DAG: [[T:%.+]] = constant true
// CHECK-NEXT: return [[F]],
- %0 = cmpf "false", %c42, %cqnan : f32
+ %0 = cmpf false, %c42, %cqnan : f32
// CHECK-SAME: [[F]]
- %1 = cmpf "oeq", %c42, %cqnan : f32
+ %1 = cmpf oeq, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %2 = cmpf "ogt", %c42, %cqnan : f32
+ %2 = cmpf ogt, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %3 = cmpf "oge", %c42, %cqnan : f32
+ %3 = cmpf oge, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %4 = cmpf "olt", %c42, %cqnan : f32
+ %4 = cmpf olt, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %5 = cmpf "ole", %c42, %cqnan : f32
+ %5 = cmpf ole, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %6 = cmpf "one", %c42, %cqnan : f32
+ %6 = cmpf one, %c42, %cqnan : f32
// CHECK-SAME: [[F]],
- %7 = cmpf "ord", %c42, %cqnan : f32
+ %7 = cmpf ord, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %8 = cmpf "ueq", %c42, %cqnan : f32
+ %8 = cmpf ueq, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %9 = cmpf "ugt", %c42, %cqnan : f32
+ %9 = cmpf ugt, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %10 = cmpf "uge", %c42, %cqnan : f32
+ %10 = cmpf uge, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %11 = cmpf "ult", %c42, %cqnan : f32
+ %11 = cmpf ult, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %12 = cmpf "ule", %c42, %cqnan : f32
+ %12 = cmpf ule, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %13 = cmpf "une", %c42, %cqnan : f32
+ %13 = cmpf une, %c42, %cqnan : f32
// CHECK-SAME: [[T]],
- %14 = cmpf "uno", %c42, %cqnan : f32
+ %14 = cmpf uno, %c42, %cqnan : f32
// CHECK-SAME: [[T]]
- %15 = cmpf "true", %c42, %cqnan : f32
+ %15 = cmpf true, %c42, %cqnan : f32
return %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15 : i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
}
@@ -680,37 +680,37 @@ func @cmpf_inf() -> (i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1,
// CHECK-DAG: [[F:%.+]] = constant false
// CHECK-DAG: [[T:%.+]] = constant true
// CHECK-NEXT: return [[F]],
- %0 = cmpf "false", %c42, %cpinf: f32
+ %0 = cmpf false, %c42, %cpinf: f32
// CHECK-SAME: [[F]]
- %1 = cmpf "oeq", %c42, %cpinf: f32
+ %1 = cmpf oeq, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %2 = cmpf "ogt", %c42, %cpinf: f32
+ %2 = cmpf ogt, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %3 = cmpf "oge", %c42, %cpinf: f32
+ %3 = cmpf oge, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %4 = cmpf "olt", %c42, %cpinf: f32
+ %4 = cmpf olt, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %5 = cmpf "ole", %c42, %cpinf: f32
+ %5 = cmpf ole, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %6 = cmpf "one", %c42, %cpinf: f32
+ %6 = cmpf one, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %7 = cmpf "ord", %c42, %cpinf: f32
+ %7 = cmpf ord, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %8 = cmpf "ueq", %c42, %cpinf: f32
+ %8 = cmpf ueq, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %9 = cmpf "ugt", %c42, %cpinf: f32
+ %9 = cmpf ugt, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %10 = cmpf "uge", %c42, %cpinf: f32
+ %10 = cmpf uge, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %11 = cmpf "ult", %c42, %cpinf: f32
+ %11 = cmpf ult, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %12 = cmpf "ule", %c42, %cpinf: f32
+ %12 = cmpf ule, %c42, %cpinf: f32
// CHECK-SAME: [[T]],
- %13 = cmpf "une", %c42, %cpinf: f32
+ %13 = cmpf une, %c42, %cpinf: f32
// CHECK-SAME: [[F]],
- %14 = cmpf "uno", %c42, %cpinf: f32
+ %14 = cmpf uno, %c42, %cpinf: f32
// CHECK-SAME: [[T]]
- %15 = cmpf "true", %c42, %cpinf: f32
+ %15 = cmpf true, %c42, %cpinf: f32
return %0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15 : i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1, i1
}
diff --git a/mlir/test/Transforms/copy-removal.mlir b/mlir/test/Transforms/copy-removal.mlir
index ebd737b32aca..bb52c0c22029 100644
--- a/mlir/test/Transforms/copy-removal.mlir
+++ b/mlir/test/Transforms/copy-removal.mlir
@@ -6,7 +6,7 @@
// CHECK-LABEL: func @nested_region_control_flow_div_nested
func @nested_region_control_flow_div_nested(%arg0: index, %arg1: index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
// CHECK: %{{.*}} = scf.if
%2 = scf.if %0 -> (memref<?x?xf32>) {
@@ -311,7 +311,7 @@ func @loop_alloc(%arg0: index, %arg1: index, %arg2: index, %arg3: memref<2xf32>,
// CHECK: linalg.copy
linalg.copy(%arg3, %1) : memref<2xf32>, memref<2xf32>
%2 = scf.for %arg5 = %arg0 to %arg1 step %arg2 iter_args(%arg6 = %1) -> (memref<2xf32>) {
- %3 = cmpi "eq", %arg5, %arg1 : index
+ %3 = cmpi eq, %arg5, %arg1 : index
// CHECK: dealloc
dealloc %arg6 : memref<2xf32>
// CHECK: %[[PERCENT4:.*]] = alloc()
@@ -347,7 +347,7 @@ func @check_with_affine_dialect(%arg0: memref<4xf32>, %arg1: memref<4xf32>, %arg
affine.for %arg3 = 0 to 4 {
%5 = affine.load %arg0[%arg3] : memref<4xf32>
%6 = affine.load %arg1[%arg3] : memref<4xf32>
- %7 = cmpf "ogt", %5, %6 : f32
+ %7 = cmpf ogt, %5, %6 : f32
// CHECK: %[[SELECT_RES:.*]] = select
%8 = select %7, %5, %6 : f32
// CHECK-NEXT: affine.store %[[SELECT_RES]], %[[RES]]
diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir
index 4ee10ef62295..1c9b7650ac97 100644
--- a/mlir/test/Transforms/cse.mlir
+++ b/mlir/test/Transforms/cse.mlir
@@ -81,12 +81,12 @@ func @
diff erent_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4x?xf3
// CHECK-LABEL: @
diff erent_attributes
func @
diff erent_attributes(index, index) -> (i1, i1, i1) {
^bb0(%a : index, %b : index):
- // CHECK: %0 = cmpi "slt", %arg0, %arg1 : index
- %0 = cmpi "slt", %a, %b : index
+ // CHECK: %0 = cmpi slt, %arg0, %arg1 : index
+ %0 = cmpi slt, %a, %b : index
- // CHECK-NEXT: %1 = cmpi "ne", %arg0, %arg1 : index
+ // CHECK-NEXT: %1 = cmpi ne, %arg0, %arg1 : index
/// Predicate 1 means inequality comparison.
- %1 = cmpi "ne", %a, %b : index
+ %1 = cmpi ne, %a, %b : index
%2 = "std.cmpi"(%a, %b) {predicate = 1} : (index, index) -> i1
// CHECK-NEXT: return %0, %1, %1 : i1, i1, i1
diff --git a/mlir/test/Transforms/parametric-tiling.mlir b/mlir/test/Transforms/parametric-tiling.mlir
index 13ea6a969e4f..1022a740ee22 100644
--- a/mlir/test/Transforms/parametric-tiling.mlir
+++ b/mlir/test/Transforms/parametric-tiling.mlir
@@ -40,11 +40,11 @@ func @rectangular(%arg0: memref<?x?xf32>) {
scf.for %i = %c2 to %c44 step %c1 {
// Upper bound for the inner loop min(%i + %step, %c44).
// COMMON: %[[stepped:.*]] = addi %[[i]], %[[step]]
- // COMMON-NEXT: cmpi "slt", %c44, %[[stepped]]
+ // COMMON-NEXT: cmpi slt, %c44, %[[stepped]]
// COMMON-NEXT: %[[ub:.*]] = select {{.*}}, %c44, %[[stepped]]
//
// TILE_74: %[[stepped2:.*]] = addi %[[j]], %[[step2]]
- // TILE_74-NEXT: cmpi "slt", %c44, %[[stepped2]]
+ // TILE_74-NEXT: cmpi slt, %c44, %[[stepped2]]
// TILE_74-NEXT: %[[ub2:.*]] = select {{.*}}, %c44, %[[stepped2]]
// Created inner scf.
@@ -108,10 +108,10 @@ func @triangular(%arg0: memref<?x?xf32>) {
scf.for %i = %c2 to %c44 step %c1 {
// Upper bound for the inner loop min(%i + %step, %c44).
// COMMON: %[[stepped:.*]] = addi %[[i]], %[[step]]
- // COMMON-NEXT: cmpi "slt", %c44, %[[stepped]]
+ // COMMON-NEXT: cmpi slt, %c44, %[[stepped]]
// COMMON-NEXT: %[[ub:.*]] = select {{.*}}, %c44, %[[stepped]]
// TILE_74: %[[stepped2:.*]] = addi %[[j]], %[[step2]]
- // TILE_74-NEXT: cmpi "slt", %[[i]], %[[stepped2]]
+ // TILE_74-NEXT: cmpi slt, %[[i]], %[[stepped2]]
// TILE_74-NEXT: %[[ub2:.*]] = select {{.*}}, %[[i]], %[[stepped2]]
//
// Created inner scf.
diff --git a/mlir/test/Transforms/promote-buffers-to-stack.mlir b/mlir/test/Transforms/promote-buffers-to-stack.mlir
index 1d9806e53036..b9443cbbd860 100644
--- a/mlir/test/Transforms/promote-buffers-to-stack.mlir
+++ b/mlir/test/Transforms/promote-buffers-to-stack.mlir
@@ -417,7 +417,7 @@ func @memref_in_function_results(
func @nested_region_control_flow(
%arg0 : index,
%arg1 : index) -> memref<?x?xf32> {
- %0 = cmpi "eq", %arg0, %arg1 : index
+ %0 = cmpi eq, %arg0, %arg1 : index
%1 = alloc(%arg0, %arg0) : memref<?x?xf32>
%2 = scf.if %0 -> (memref<?x?xf32>) {
scf.yield %1 : memref<?x?xf32>
@@ -482,7 +482,7 @@ func @loop_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = alloc() : memref<2xf32>
scf.yield %3 : memref<2xf32>
}
@@ -512,7 +512,7 @@ func @loop_nested_if_no_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
scf.yield %0 : memref<2xf32>
} else {
@@ -547,7 +547,7 @@ func @loop_nested_if_alloc(
%0 = alloc() : memref<2xf32>
%1 = scf.for %i = %lb to %ub step %step
iter_args(%iterBuf = %buf) -> memref<2xf32> {
- %2 = cmpi "eq", %i, %ub : index
+ %2 = cmpi eq, %i, %ub : index
%3 = scf.if %2 -> (memref<2xf32>) {
%4 = alloc() : memref<2xf32>
scf.yield %4 : memref<2xf32>
diff --git a/mlir/test/Transforms/sccp-callgraph.mlir b/mlir/test/Transforms/sccp-callgraph.mlir
index 27ac6d5c7c26..07056a3bd376 100644
--- a/mlir/test/Transforms/sccp-callgraph.mlir
+++ b/mlir/test/Transforms/sccp-callgraph.mlir
@@ -188,7 +188,7 @@ func private @complex_inner_if(%arg0 : i32) -> i32 {
// CHECK: cond_br %[[TRUE]], ^bb1
%cst_20 = constant 20 : i32
- %cond = cmpi "ult", %arg0, %cst_20 : i32
+ %cond = cmpi ult, %arg0, %cst_20 : i32
cond_br %cond, ^bb1, ^bb2
^bb1:
diff --git a/mlir/test/Transforms/sccp-structured.mlir b/mlir/test/Transforms/sccp-structured.mlir
index 0aaa9da4507f..45227cd30b3d 100644
--- a/mlir/test/Transforms/sccp-structured.mlir
+++ b/mlir/test/Transforms/sccp-structured.mlir
@@ -118,7 +118,7 @@ func @loop_inner_control_flow(%arg0 : index, %arg1 : index, %arg2 : index) -> i3
%cst_1 = constant 1 : i32
%result = scf.for %i0 = %arg0 to %arg1 step %arg2 iter_args(%si = %cst_1) -> (i32) {
%cst_20 = constant 20 : i32
- %cond = cmpi "ult", %si, %cst_20 : i32
+ %cond = cmpi ult, %si, %cst_20 : i32
%inner_res = scf.if %cond -> (i32) {
%1 = constant 1 : i32
scf.yield %1 : i32
diff --git a/mlir/test/Transforms/sccp.mlir b/mlir/test/Transforms/sccp.mlir
index 382ea5e39921..f9317af9f912 100644
--- a/mlir/test/Transforms/sccp.mlir
+++ b/mlir/test/Transforms/sccp.mlir
@@ -137,7 +137,7 @@ func @simple_loop_inner_control_flow(%arg0 : i32) -> i32 {
// CHECK: cond_br %[[TRUE]], ^bb3, ^bb4
%cst_20 = constant 20 : i32
- %cond = cmpi "ult", %iv, %cst_20 : i32
+ %cond = cmpi ult, %iv, %cst_20 : i32
cond_br %cond, ^bb3, ^bb4
^bb3:
diff --git a/mlir/test/mlir-tblgen/op-format.mlir b/mlir/test/mlir-tblgen/op-format.mlir
index 4eb64772aee2..4ec07998ac7b 100644
--- a/mlir/test/mlir-tblgen/op-format.mlir
+++ b/mlir/test/mlir-tblgen/op-format.mlir
@@ -187,8 +187,8 @@ test.format_optional_unit_attribute
// CHECK: test.format_optional_unit_attribute_no_elide unit
test.format_optional_unit_attribute_no_elide unit
-// CHECK: test.format_optional_enum_attr "case5"
-test.format_optional_enum_attr "case5"
+// CHECK: test.format_optional_enum_attr case5
+test.format_optional_enum_attr case5
// CHECK: test.format_optional_enum_attr
// CHECK-NOT: "case5"
diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
index bba796f9b492..f07468a7049f 100644
--- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp
@@ -35,6 +35,15 @@ static llvm::cl::opt<bool> formatErrorIsFatal(
llvm::cl::desc("Emit a fatal error if format parsing fails"),
llvm::cl::init(true));
+/// Returns true if the given string can be formatted as a keyword.
+static bool canFormatStringAsKeyword(StringRef value) {
+ if (!isalpha(value.front()) && value.front() != '_')
+ return false;
+ return llvm::all_of(value.drop_front(), [](char c) {
+ return isalnum(c) || c == '_' || c == '$' || c == '.';
+ });
+}
+
//===----------------------------------------------------------------------===//
// Element
//===----------------------------------------------------------------------===//
@@ -289,11 +298,7 @@ bool LiteralElement::isValidLiteral(StringRef value) {
return true;
// Otherwise, this must be an identifier.
- if (!isalpha(front) && front != '_')
- return false;
- return llvm::all_of(value.drop_front(), [](char c) {
- return isalnum(c) || c == '_' || c == '$' || c == '.';
- });
+ return canFormatStringAsKeyword(value);
}
//===----------------------------------------------------------------------===//
@@ -536,41 +541,32 @@ const char *const optionalSymbolNameAttrParserCode = R"(
/// {1}: The c++ namespace for the enum symbolize functions.
/// {2}: The function to symbolize a string of the enum.
/// {3}: The constant builder call to create an attribute of the enum type.
+/// {4}: The set of allowed enum keywords.
+/// {5}: The error message on failure when the enum isn't present.
const char *const enumAttrParserCode = R"(
{
- ::mlir::StringAttr attrVal;
+ ::llvm::StringRef attrStr;
::mlir::NamedAttrList attrStorage;
auto loc = parser.getCurrentLocation();
- if (parser.parseAttribute(attrVal, parser.getBuilder().getNoneType(),
- "{0}", attrStorage))
- return ::mlir::failure();
-
- auto attrOptional = {1}::{2}(attrVal.getValue());
- if (!attrOptional)
- return parser.emitError(loc, "invalid ")
- << "{0} attribute specification: " << attrVal;
-
- {0}Attr = {3};
- result.addAttribute("{0}", {0}Attr);
- }
-)";
-const char *const optionalEnumAttrParserCode = R"(
- {
- ::mlir::StringAttr attrVal;
- ::mlir::NamedAttrList attrStorage;
- auto loc = parser.getCurrentLocation();
-
- ::mlir::OptionalParseResult parseResult =
- parser.parseOptionalAttribute(attrVal, parser.getBuilder().getNoneType(),
- "{0}", attrStorage);
- if (parseResult.hasValue()) {
- if (failed(*parseResult))
- return ::mlir::failure();
-
- auto attrOptional = {1}::{2}(attrVal.getValue());
+ if (parser.parseOptionalKeyword(&attrStr, {4})) {
+ ::mlir::StringAttr attrVal;
+ ::mlir::OptionalParseResult parseResult =
+ parser.parseOptionalAttribute(attrVal,
+ parser.getBuilder().getNoneType(),
+ "{0}", attrStorage);
+ if (parseResult.hasValue()) {{
+ if (failed(*parseResult))
+ return ::mlir::failure();
+ attrStr = attrVal.getValue();
+ } else {
+ {5}
+ }
+ }
+ if (!attrStr.empty()) {
+ auto attrOptional = {1}::{2}(attrStr);
if (!attrOptional)
return parser.emitError(loc, "invalid ")
- << "{0} attribute specification: " << attrVal;
+ << "{0} attribute specification: \"" << attrStr << '"';;
{0}Attr = {3};
result.addAttribute("{0}", {0}Attr);
@@ -1029,6 +1025,49 @@ static void genCustomDirectiveParser(CustomDirective *dir, OpMethodBody &body) {
body << " }\n";
}
+/// Generate the parser for a enum attribute.
+static void genEnumAttrParser(const NamedAttribute *var, OpMethodBody &body,
+ FmtContext &attrTypeCtx) {
+ Attribute baseAttr = var->attr.getBaseAttr();
+ const EnumAttr &enumAttr = cast<EnumAttr>(baseAttr);
+ std::vector<EnumAttrCase> cases = enumAttr.getAllCases();
+
+ // Generate the code for building an attribute for this enum.
+ std::string attrBuilderStr;
+ {
+ llvm::raw_string_ostream os(attrBuilderStr);
+ os << tgfmt(enumAttr.getConstBuilderTemplate(), &attrTypeCtx,
+ "attrOptional.getValue()");
+ }
+
+ // Build a string containing the cases that can be formatted as a keyword.
+ std::string validCaseKeywordsStr = "{";
+ llvm::raw_string_ostream validCaseKeywordsOS(validCaseKeywordsStr);
+ for (const EnumAttrCase &attrCase : cases)
+ if (canFormatStringAsKeyword(attrCase.getStr()))
+ validCaseKeywordsOS << '"' << attrCase.getStr() << "\",";
+ validCaseKeywordsOS.str().back() = '}';
+
+ // If the attribute is not optional, build an error message for the missing
+ // attribute.
+ std::string errorMessage;
+ if (!var->attr.isOptional()) {
+ llvm::raw_string_ostream errorMessageOS(errorMessage);
+ errorMessageOS
+ << "return parser.emitError(loc, \"expected string or "
+ "keyword containing one of the following enum values for attribute '"
+ << var->name << "' [";
+ llvm::interleaveComma(cases, errorMessageOS, [&](const auto &attrCase) {
+ errorMessageOS << attrCase.getStr();
+ });
+ errorMessageOS << "]\");";
+ }
+
+ body << formatv(enumAttrParserCode, var->name, enumAttr.getCppNamespace(),
+ enumAttr.getStringToSymbolFnName(), attrBuilderStr,
+ validCaseKeywordsStr, errorMessage);
+}
+
void OperationFormat::genParser(Operator &op, OpClass &opClass) {
llvm::SmallVector<OpMethodParameter, 4> paramList;
paramList.emplace_back("::mlir::OpAsmParser &", "parser");
@@ -1130,24 +1169,8 @@ void OperationFormat::genElementParser(Element *element, OpMethodBody &body,
const NamedAttribute *var = attr->getVar();
// Check to see if we can parse this as an enum attribute.
- if (canFormatEnumAttr(var)) {
- Attribute baseAttr = var->attr.getBaseAttr();
- const EnumAttr &enumAttr = cast<EnumAttr>(baseAttr);
-
- // Generate the code for building an attribute for this enum.
- std::string attrBuilderStr;
- {
- llvm::raw_string_ostream os(attrBuilderStr);
- os << tgfmt(enumAttr.getConstBuilderTemplate(), &attrTypeCtx,
- "attrOptional.getValue()");
- }
-
- body << formatv(var->attr.isOptional() ? optionalEnumAttrParserCode
- : enumAttrParserCode,
- var->name, enumAttr.getCppNamespace(),
- enumAttr.getStringToSymbolFnName(), attrBuilderStr);
- return;
- }
+ if (canFormatEnumAttr(var))
+ return genEnumAttrParser(var, body, attrTypeCtx);
// Check to see if we should parse this as a symbol name attribute.
if (shouldFormatSymbolNameAttr(var)) {
@@ -1497,6 +1520,17 @@ const char *regionSingleBlockImplicitTerminatorPrinterCode = R"(
}
)";
+/// The code snippet used to generate a printer call for an enum that has cases
+/// that can't be represented with a keyword.
+///
+/// {0}: The name of the enum attribute.
+/// {1}: The name of the enum attributes symbolToString function.
+const char *enumAttrBeginPrinterCode = R"(
+ {
+ auto caseValue = {0}();
+ auto caseValueStr = {1}(caseValue);
+)";
+
/// Generate the printer for the 'attr-dict' directive.
static void genAttrDictPrinter(OperationFormat &fmt, Operator &op,
OpMethodBody &body, bool withKeyword) {
@@ -1639,6 +1673,82 @@ static OpMethodBody &genTypeOperandPrinter(Element *arg, OpMethodBody &body) {
<< "().getType())";
}
+/// Generate the printer for an enum attribute.
+static void genEnumAttrPrinter(const NamedAttribute *var, OpMethodBody &body) {
+ Attribute baseAttr = var->attr.getBaseAttr();
+ const EnumAttr &enumAttr = cast<EnumAttr>(baseAttr);
+ std::vector<EnumAttrCase> cases = enumAttr.getAllCases();
+
+ body << llvm::formatv(enumAttrBeginPrinterCode,
+ (var->attr.isOptional() ? "*" : "") + var->name,
+ enumAttr.getSymbolToStringFnName());
+
+ // Get a string containing all of the cases that can't be represented with a
+ // keyword.
+ llvm::BitVector nonKeywordCases(cases.size());
+ bool hasStrCase = false;
+ for (auto it : llvm::enumerate(cases)) {
+ hasStrCase = it.value().isStrCase();
+ if (!canFormatStringAsKeyword(it.value().getStr()))
+ nonKeywordCases.set(it.index());
+ }
+
+ // If this is a string enum, use the case string to determine which cases
+ // need to use the string form.
+ if (hasStrCase) {
+ if (nonKeywordCases.any()) {
+ body << " if (llvm::is_contained(llvm::ArrayRef<llvm::StringRef>(";
+ llvm::interleaveComma(nonKeywordCases.set_bits(), body, [&](unsigned it) {
+ body << '"' << cases[it].getStr() << '"';
+ });
+ body << ")))\n"
+ " p << '\"' << caseValueStr << '\"';\n"
+ " else\n ";
+ }
+ body << " p << caseValueStr;\n"
+ " }\n";
+ return;
+ }
+
+ // Otherwise if this is a bit enum attribute, don't allow cases that may
+ // overlap with other cases. For simplicity sake, only allow cases with a
+ // single bit value.
+ if (enumAttr.isBitEnum()) {
+ for (auto it : llvm::enumerate(cases)) {
+ int64_t value = it.value().getValue();
+ if (value < 0 || !llvm::isPowerOf2_64(value))
+ nonKeywordCases.set(it.index());
+ }
+ }
+
+ // If there are any cases that can't be used with a keyword, switch on the
+ // case value to determine when to print in the string form.
+ if (nonKeywordCases.any()) {
+ body << " switch (caseValue) {\n";
+ StringRef cppNamespace = enumAttr.getCppNamespace();
+ StringRef enumName = enumAttr.getEnumClassName();
+ for (auto it : llvm::enumerate(cases)) {
+ if (nonKeywordCases.test(it.index()))
+ continue;
+ StringRef symbol = it.value().getSymbol();
+ body << llvm::formatv(" case {0}::{1}::{2}:\n", cppNamespace, enumName,
+ llvm::isDigit(symbol.front()) ? ("_" + symbol)
+ : symbol);
+ }
+ body << " p << caseValueStr;\n"
+ " break;\n"
+ " default:\n"
+ " p << '\"' << caseValueStr << '\"';\n"
+ " break;\n"
+ " }\n"
+ " }\n";
+ return;
+ }
+
+ body << " p << caseValueStr;\n"
+ " }\n";
+}
+
void OperationFormat::genElementPrinter(Element *element, OpMethodBody &body,
Operator &op, bool &shouldEmitSpace,
bool &lastWasPunctuation) {
@@ -1714,14 +1824,8 @@ void OperationFormat::genElementPrinter(Element *element, OpMethodBody &body,
const NamedAttribute *var = attr->getVar();
// If we are formatting as an enum, symbolize the attribute as a string.
- if (canFormatEnumAttr(var)) {
- Attribute baseAttr = var->attr.getBaseAttr();
- const EnumAttr &enumAttr = cast<EnumAttr>(baseAttr);
- body << " p << '\"' << " << enumAttr.getSymbolToStringFnName() << "("
- << (var->attr.isOptional() ? "*" : "") << var->name
- << "()) << '\"';\n";
- return;
- }
+ if (canFormatEnumAttr(var))
+ return genEnumAttrPrinter(var, body);
// If we are formatting as a symbol name, handle it as a symbol name.
if (shouldFormatSymbolNameAttr(var)) {
More information about the llvm-branch-commits
mailing list