[Mlir-commits] [mlir] dec8af7 - [mlir] Move SelectOp from Standard to Arithmetic
River Riddle
llvmlistbot at llvm.org
Wed Feb 2 14:46:04 PST 2022
Author: River Riddle
Date: 2022-02-02T14:45:12-08:00
New Revision: dec8af701ff25ea967d47d196e5247306754f8e8
URL: https://github.com/llvm/llvm-project/commit/dec8af701ff25ea967d47d196e5247306754f8e8
DIFF: https://github.com/llvm/llvm-project/commit/dec8af701ff25ea967d47d196e5247306754f8e8.diff
LOG: [mlir] Move SelectOp from Standard to Arithmetic
This is part of splitting up the standard dialect. See https://llvm.discourse.group/t/standard-dialect-the-final-chapter/ for discussion.
Differential Revision: https://reviews.llvm.org/D118648
Added:
Modified:
flang/lib/Optimizer/Builder/Character.cpp
flang/lib/Optimizer/Builder/MutableBox.cpp
flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
flang/lib/Optimizer/Transforms/RewriteLoop.cpp
flang/test/Fir/loop02.fir
flang/unittests/Optimizer/Builder/Runtime/NumericTest.cpp
mlir/benchmark/python/common.py
mlir/docs/Bufferization.md
mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h
mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td
mlir/include/mlir/Dialect/Tosa/Utils/CoversionUtils.h
mlir/include/mlir/IR/OpDefinition.h
mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
mlir/lib/Conversion/TosaToStandard/TosaToStandard.cpp
mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
mlir/lib/Dialect/Arithmetic/IR/CMakeLists.txt
mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
mlir/lib/Dialect/Math/Transforms/ExpandTanh.cpp
mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp
mlir/lib/Dialect/SCF/SCF.cpp
mlir/lib/Dialect/SCF/Utils/Utils.cpp
mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
mlir/lib/Dialect/StandardOps/IR/Ops.cpp
mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt
mlir/lib/Dialect/StandardOps/Utils/Utils.cpp
mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
mlir/test/Analysis/test-match-reduction.mlir
mlir/test/Conversion/AffineToStandard/lower-affine.mlir
mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
mlir/test/Conversion/SCFToOpenMP/reductions.mlir
mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
mlir/test/Conversion/TosaToStandard/tosa-to-standard.mlir
mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
mlir/test/Dialect/Affine/parallelize.mlir
mlir/test/Dialect/Arithmetic/bufferize.mlir
mlir/test/Dialect/Arithmetic/expand-ops.mlir
mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir
mlir/test/Dialect/Async/async-parallel-for-num-worker-threads.mlir
mlir/test/Dialect/GPU/all-reduce-max.mlir
mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
mlir/test/Dialect/Linalg/loops.mlir
mlir/test/Dialect/Linalg/roundtrip.mlir
mlir/test/Dialect/Linalg/vectorization.mlir
mlir/test/Dialect/Math/polynomial-approximation.mlir
mlir/test/Dialect/MemRef/expand-ops.mlir
mlir/test/Dialect/SCF/canonicalize.mlir
mlir/test/Dialect/SCF/ops.mlir
mlir/test/Dialect/SparseTensor/sparse_1d.mlir
mlir/test/Dialect/SparseTensor/sparse_2d.mlir
mlir/test/Dialect/SparseTensor/sparse_3d.mlir
mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
mlir/test/Dialect/SparseTensor/sparse_out.mlir
mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
mlir/test/Dialect/Standard/canonicalize-cf.mlir
mlir/test/Dialect/Standard/canonicalize.mlir
mlir/test/Dialect/Standard/expand-tanh.mlir
mlir/test/Dialect/Vector/vector-contract-transforms.mlir
mlir/test/Dialect/Vector/vector-transforms.mlir
mlir/test/IR/core-ops.mlir
mlir/test/IR/invalid-ops.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
mlir/test/Transforms/canonicalize-block-merge.mlir
mlir/test/Transforms/canonicalize.mlir
mlir/test/Transforms/parametric-tiling.mlir
mlir/test/Transforms/sccp-callgraph.mlir
mlir/test/Transforms/sccp.mlir
mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp
mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
Removed:
mlir/include/mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h
mlir/lib/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
mlir/test/Dialect/Standard/bufferize.mlir
################################################################################
diff --git a/flang/lib/Optimizer/Builder/Character.cpp b/flang/lib/Optimizer/Builder/Character.cpp
index 8b84170d663ad..87faa3b42c449 100644
--- a/flang/lib/Optimizer/Builder/Character.cpp
+++ b/flang/lib/Optimizer/Builder/Character.cpp
@@ -434,7 +434,7 @@ mlir::Value genMin(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value a, mlir::Value b) {
auto cmp =
builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, a, b);
- return builder.create<mlir::SelectOp>(loc, cmp, a, b);
+ return builder.create<mlir::arith::SelectOp>(loc, cmp, a, b);
}
void fir::factory::CharacterExprHelper::createAssign(
@@ -532,7 +532,8 @@ fir::CharBoxValue fir::factory::CharacterExprHelper::createSubstring(
auto zero = builder.createIntegerConstant(loc, substringLen.getType(), 0);
auto cdt = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
substringLen, zero);
- substringLen = builder.create<mlir::SelectOp>(loc, cdt, zero, substringLen);
+ substringLen =
+ builder.create<mlir::arith::SelectOp>(loc, cdt, zero, substringLen);
return {substringRef, substringLen};
}
@@ -570,8 +571,8 @@ fir::factory::CharacterExprHelper::createLenTrim(const fir::CharBoxValue &str) {
// Compute length after iteration (zero if all blanks)
mlir::Value newLen =
builder.create<arith::AddIOp>(loc, iterWhile.getResult(1), one);
- auto result =
- builder.create<mlir::SelectOp>(loc, iterWhile.getResult(0), zero, newLen);
+ auto result = builder.create<mlir::arith::SelectOp>(
+ loc, iterWhile.getResult(0), zero, newLen);
return builder.createConvert(loc, builder.getCharacterLengthType(), result);
}
diff --git a/flang/lib/Optimizer/Builder/MutableBox.cpp b/flang/lib/Optimizer/Builder/MutableBox.cpp
index 9401746966efc..3b9fe3c52b971 100644
--- a/flang/lib/Optimizer/Builder/MutableBox.cpp
+++ b/flang/lib/Optimizer/Builder/MutableBox.cpp
@@ -675,8 +675,8 @@ void fir::factory::genReallocIfNeeded(fir::FirOpBuilder &builder,
// reallocate = reallocate || previous != required
auto cmp = builder.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ne, castPrevious, required);
- mustReallocate =
- builder.create<mlir::SelectOp>(loc, cmp, cmp, mustReallocate);
+ mustReallocate = builder.create<mlir::arith::SelectOp>(
+ loc, cmp, cmp, mustReallocate);
};
llvm::SmallVector<mlir::Value> previousLbounds;
llvm::SmallVector<mlir::Value> previousExtents =
diff --git a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
index edcb40a7f0a38..5c7da46cfc3ff 100644
--- a/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
+++ b/flang/lib/Optimizer/Builder/Runtime/Numeric.cpp
@@ -295,7 +295,8 @@ mlir::Value fir::runtime::genNearest(fir::FirOpBuilder &builder,
mlir::Value False = builder.createIntegerConstant(loc, boolTy, 0);
mlir::Value True = builder.createIntegerConstant(loc, boolTy, 1);
- mlir::Value positive = builder.create<mlir::SelectOp>(loc, cmp, True, False);
+ mlir::Value positive =
+ builder.create<mlir::arith::SelectOp>(loc, cmp, True, False);
auto args = fir::runtime::createArguments(builder, loc, funcTy, x, positive);
return builder.create<fir::CallOp>(loc, func, args).getResult(0);
diff --git a/flang/lib/Optimizer/Transforms/RewriteLoop.cpp b/flang/lib/Optimizer/Transforms/RewriteLoop.cpp
index 84e377540635e..92da77e74e8ba 100644
--- a/flang/lib/Optimizer/Transforms/RewriteLoop.cpp
+++ b/flang/lib/Optimizer/Transforms/RewriteLoop.cpp
@@ -75,7 +75,7 @@ class CfgLoopConv : public mlir::OpRewritePattern<fir::DoLoopOp> {
auto cond = rewriter.create<mlir::arith::CmpIOp>(
loc, arith::CmpIPredicate::sle, iters, zero);
auto one = rewriter.create<mlir::arith::ConstantIndexOp>(loc, 1);
- iters = rewriter.create<mlir::SelectOp>(loc, cond, one, iters);
+ iters = rewriter.create<mlir::arith::SelectOp>(loc, cond, one, iters);
}
llvm::SmallVector<mlir::Value> loopOperands;
diff --git a/flang/test/Fir/loop02.fir b/flang/test/Fir/loop02.fir
index 88155710e563d..0cb794611ecef 100644
--- a/flang/test/Fir/loop02.fir
+++ b/flang/test/Fir/loop02.fir
@@ -23,7 +23,7 @@ func private @y(%addr : !fir.ref<index>)
// CHECK: %[[VAL_6:.*]] = arith.constant 0 : index
// CHECK: %[[VAL_7:.*]] = arith.cmpi sle, %[[VAL_5]], %[[VAL_6]] : index
// CHECK: %[[VAL_8:.*]] = arith.constant 1 : index
-// CHECK: %[[VAL_9:.*]] = select %[[VAL_7]], %[[VAL_8]], %[[VAL_5]] : index
+// CHECK: %[[VAL_9:.*]] = arith.select %[[VAL_7]], %[[VAL_8]], %[[VAL_5]] : index
// CHECK: br ^bb1(%[[VAL_1]], %[[VAL_9]] : index, index)
// CHECK: ^bb1(%[[VAL_10:.*]]: index, %[[VAL_11:.*]]: index):
// CHECK: %[[VAL_12:.*]] = arith.constant 0 : index
diff --git a/flang/unittests/Optimizer/Builder/Runtime/NumericTest.cpp b/flang/unittests/Optimizer/Builder/Runtime/NumericTest.cpp
index 30368c00d4805..0941da91e71e9 100644
--- a/flang/unittests/Optimizer/Builder/Runtime/NumericTest.cpp
+++ b/flang/unittests/Optimizer/Builder/Runtime/NumericTest.cpp
@@ -56,8 +56,8 @@ void testGenNearest(fir::FirOpBuilder &builder, mlir::Type xType,
checkCallOp(nearest.getDefiningOp(), fctName, 2, /*addLocArg=*/false);
auto callOp = mlir::dyn_cast<fir::CallOp>(nearest.getDefiningOp());
mlir::Value select = callOp.getOperands()[1];
- EXPECT_TRUE(mlir::isa<mlir::SelectOp>(select.getDefiningOp()));
- auto selectOp = mlir::dyn_cast<mlir::SelectOp>(select.getDefiningOp());
+ EXPECT_TRUE(mlir::isa<mlir::arith::SelectOp>(select.getDefiningOp()));
+ auto selectOp = mlir::dyn_cast<mlir::arith::SelectOp>(select.getDefiningOp());
mlir::Value cmp = selectOp.getCondition();
EXPECT_TRUE(mlir::isa<mlir::arith::CmpFOp>(cmp.getDefiningOp()));
auto cmpOp = mlir::dyn_cast<mlir::arith::CmpFOp>(cmp.getDefiningOp());
diff --git a/mlir/benchmark/python/common.py b/mlir/benchmark/python/common.py
index 8c7e8e2f3150d..23b667e362851 100644
--- a/mlir/benchmark/python/common.py
+++ b/mlir/benchmark/python/common.py
@@ -29,7 +29,7 @@ def setup_passes(mlir_module):
f"convert-scf-to-std,"
f"func-bufferize,"
f"arith-bufferize,"
- f"builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),"
+ f"builtin.func(tensor-bufferize,finalizing-bufferize),"
f"convert-vector-to-llvm"
f"{{reassociate-fp-reductions=1 enable-index-optimizations=1}},"
f"lower-affine,"
diff --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md
index 1bd0188f97229..2f1d7a17c4c47 100644
--- a/mlir/docs/Bufferization.md
+++ b/mlir/docs/Bufferization.md
@@ -87,7 +87,6 @@ The code, slightly simplified and annotated, is reproduced here:
pm.addNestedPass<FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
pm.addNestedPass<FuncOp>(createSCFBufferizePass());
pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
- pm.addNestedPass<FuncOp>(createStdBufferizePass());
pm.addNestedPass<FuncOp>(createTensorBufferizePass());
pm.addPass(createFuncBufferizePass());
diff --git a/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h b/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
index 65d819f18df28..f74ace109a50a 100644
--- a/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
+++ b/mlir/include/mlir/Dialect/Arithmetic/IR/Arithmetic.h
@@ -12,6 +12,7 @@
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/Interfaces/CastInterfaces.h"
+#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Interfaces/VectorInterfaces.h"
diff --git a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
index 59f867488951e..bef9178a507a2 100644
--- a/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
+++ b/mlir/include/mlir/Dialect/Arithmetic/IR/ArithmeticOps.td
@@ -1088,7 +1088,7 @@ def Arith_CmpIOp : Arith_CompareOpOfAnyRank<"cmpi"> {
%x = arith.cmpi "eq", %lhs, %rhs : vector<4xi64>
// Generic form of the same operation.
- %x = "std.arith.cmpi"(%lhs, %rhs) {predicate = 0 : i64}
+ %x = "arith.cmpi"(%lhs, %rhs) {predicate = 0 : i64}
: (vector<4xi64>, vector<4xi64>) -> vector<4xi1>
```
}];
@@ -1161,4 +1161,55 @@ def Arith_CmpFOp : Arith_CompareOp<"cmpf"> {
let hasFolder = 1;
}
+//===----------------------------------------------------------------------===//
+// SelectOp
+//===----------------------------------------------------------------------===//
+
+def SelectOp : Arith_Op<"select", [
+ AllTypesMatch<["true_value", "false_value", "result"]>
+ ] # ElementwiseMappable.traits> {
+ let summary = "select operation";
+ let description = [{
+ The `arith.select` operation chooses one value based on a binary condition
+ supplied as its first operand. If the value of the first operand is `1`,
+ the second operand is chosen, otherwise the third operand is chosen.
+ The second and the third operand must have the same type.
+
+ The operation applies to vectors and tensors elementwise given the _shape_
+ of all operands is identical. The choice is made for each element
+ individually based on the value at the same position as the element in the
+ condition operand. If an i1 is provided as the condition, the entire vector
+ or tensor is chosen.
+
+ Example:
+
+ ```mlir
+ // Custom form of scalar selection.
+ %x = arith.select %cond, %true, %false : i32
+
+ // Generic form of the same operation.
+ %x = "arith.select"(%cond, %true, %false) : (i1, i32, i32) -> i32
+
+ // Element-wise vector selection.
+ %vx = arith.select %vcond, %vtrue, %vfalse : vector<42xi1>, vector<42xf32>
+
+ // Full vector selection.
+ %vx = arith.select %cond, %vtrue, %vfalse : vector<42xf32>
+ ```
+ }];
+
+ let arguments = (ins BoolLike:$condition,
+ AnyType:$true_value,
+ AnyType:$false_value);
+ let results = (outs AnyType:$result);
+
+ let hasCanonicalizer = 1;
+ let hasFolder = 1;
+ let hasVerifier = 1;
+
+ // FIXME: Switch this to use the declarative assembly format.
+ let printer = [{ return ::print(p, *this); }];
+ let parser = [{ return ::parse$cppClass(parser, result); }];
+}
+
#endif // ARITHMETIC_OPS
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
index e070961f9eb8a..e78d00ab2ac80 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td
@@ -146,7 +146,7 @@ def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
Note: This method can return multiple OpOperands, indicating that the
given OpResult may at runtime alias with any of the OpOperands. This
- is useful for branches and for ops such as `std.select`.
+ is useful for branches and for ops such as `arith.select`.
}],
/*retType=*/"SmallVector<OpOperand *>",
/*methodName=*/"getAliasingOpOperand",
diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
index ab0d93783b7bf..9efe4ceb21473 100644
--- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
+++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td
@@ -456,57 +456,6 @@ def ReturnOp : Std_Op<"return", [NoSideEffect, HasParent<"FuncOp">,
let hasVerifier = 1;
}
-//===----------------------------------------------------------------------===//
-// SelectOp
-//===----------------------------------------------------------------------===//
-
-def SelectOp : Std_Op<"select", [NoSideEffect,
- AllTypesMatch<["true_value", "false_value", "result"]>,
- DeclareOpInterfaceMethods<VectorUnrollOpInterface>] #
- ElementwiseMappable.traits> {
- let summary = "select operation";
- let description = [{
- The `select` operation chooses one value based on a binary condition
- supplied as its first operand. If the value of the first operand is `1`,
- the second operand is chosen, otherwise the third operand is chosen.
- The second and the third operand must have the same type.
-
- The operation applies to vectors and tensors elementwise given the _shape_
- of all operands is identical. The choice is made for each element
- individually based on the value at the same position as the element in the
- condition operand. If an i1 is provided as the condition, the entire vector
- or tensor is chosen.
-
- The `select` operation combined with [`cmpi`](#stdcmpi-cmpiop) can be used
- to implement `min` and `max` with signed or unsigned comparison semantics.
-
- Example:
-
- ```mlir
- // Custom form of scalar selection.
- %x = select %cond, %true, %false : i32
-
- // Generic form of the same operation.
- %x = "std.select"(%cond, %true, %false) : (i1, i32, i32) -> i32
-
- // Element-wise vector selection.
- %vx = std.select %vcond, %vtrue, %vfalse : vector<42xi1>, vector<42xf32>
-
- // Full vector selection.
- %vx = std.select %cond, %vtrue, %vfalse : vector<42xf32>
- ```
- }];
-
- let arguments = (ins BoolLike:$condition,
- AnyType:$true_value,
- AnyType:$false_value);
- let results = (outs AnyType:$result);
-
- let hasCanonicalizer = 1;
- let hasFolder = 1;
- let hasVerifier = 1;
-}
-
//===----------------------------------------------------------------------===//
// SwitchOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h b/mlir/include/mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h
deleted file mode 100644
index a85acbbb195a6..0000000000000
--- a/mlir/include/mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h
+++ /dev/null
@@ -1,18 +0,0 @@
-//===- BufferizableOpInterfaceImpl.h - Impl. of BufferizableOpInterface ---===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MLIR_DIALECT_STANDARDOPS_BUFFERIZABLEOPINTERFACEIMPL_H
-#define MLIR_DIALECT_STANDARDOPS_BUFFERIZABLEOPINTERFACEIMPL_H
-
-namespace mlir {
-class DialectRegistry;
-
-void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry);
-} // namespace mlir
-
-#endif // MLIR_DIALECT_STANDARDOPS_BUFFERIZABLEOPINTERFACEIMPL_H
diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h
index d6b8d2028e0e2..57bd00313dc2e 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h
@@ -23,9 +23,6 @@ class BufferizeTypeConverter;
class RewritePatternSet;
-/// Creates an instance of std bufferization pass.
-std::unique_ptr<Pass> createStdBufferizePass();
-
/// Creates an instance of func bufferization pass.
std::unique_ptr<Pass> createFuncBufferizePass();
diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td
index 3e08865c6f71a..c871379f45087 100644
--- a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td
@@ -11,11 +11,6 @@
include "mlir/Pass/PassBase.td"
-def StdBufferize : Pass<"std-bufferize", "FuncOp"> {
- let summary = "Bufferize the std dialect";
- let constructor = "mlir::createStdBufferizePass()";
-}
-
def FuncBufferize : Pass<"func-bufferize", "ModuleOp"> {
let summary = "Bufferize func/call/return ops";
let description = [{
diff --git a/mlir/include/mlir/Dialect/Tosa/Utils/CoversionUtils.h b/mlir/include/mlir/Dialect/Tosa/Utils/CoversionUtils.h
index bbf149865f6e9..0848979be31eb 100644
--- a/mlir/include/mlir/Dialect/Tosa/Utils/CoversionUtils.h
+++ b/mlir/include/mlir/Dialect/Tosa/Utils/CoversionUtils.h
@@ -30,13 +30,14 @@ SmallVector<Value> condenseValues(const SmallVector<Value> &values);
// Takes the parameters for a clamp and turns it into a series of ops.
template <typename T, typename P>
-mlir::SelectOp clampHelper(Location loc, Value arg, arith::ConstantOp min,
- arith::ConstantOp max, P pred, OpBuilder &rewriter) {
+arith::SelectOp clampHelper(Location loc, Value arg, arith::ConstantOp min,
+ arith::ConstantOp max, P pred,
+ OpBuilder &rewriter) {
auto smallerThanMin = rewriter.create<T>(loc, pred, arg, min);
auto minOrArg =
- rewriter.create<mlir::SelectOp>(loc, smallerThanMin, min, arg);
+ rewriter.create<arith::SelectOp>(loc, smallerThanMin, min, arg);
auto largerThanMax = rewriter.create<T>(loc, pred, max, arg);
- return rewriter.create<mlir::SelectOp>(loc, largerThanMax, max, minOrArg);
+ return rewriter.create<arith::SelectOp>(loc, largerThanMax, max, minOrArg);
}
// Returns the values in an attribute as an array of values.
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index d83a7be5c0cd3..ac51c7bb51ddb 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -1368,14 +1368,14 @@ struct Elementwise : public TraitBase<ConcreteType, Elementwise> {
///
/// Example:
/// ```
-/// %tensor_select = "std.select"(%pred_tensor, %true_val, %false_val)
+/// %tensor_select = "arith.select"(%pred_tensor, %true_val, %false_val)
/// : (tensor<?xi1>, tensor<?xf32>, tensor<?xf32>)
/// -> tensor<?xf32>
/// ```
/// can be scalarized to
///
/// ```
-/// %scalar_select = "std.select"(%pred, %true_val_scalar, %false_val_scalar)
+/// %scalar_select = "arith.select"(%pred, %true_val_scalar, %false_val_scalar)
/// : (i1, f32, f32) -> f32
/// ```
template <typename ConcreteType>
@@ -1430,12 +1430,12 @@ struct Vectorizable : public TraitBase<ConcreteType, Vectorizable> {
/// ```
///
/// ```
-/// %scalar_pred = "std.select"(%pred, %true_val, %false_val)
+/// %scalar_pred = "arith.select"(%pred, %true_val, %false_val)
/// : (i1, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
/// ```
/// can be tensorized to
/// ```
-/// %tensor_pred = "std.select"(%pred, %true_val, %false_val)
+/// %tensor_pred = "arith.select"(%pred, %true_val, %false_val)
/// : (tensor<?xi1>, tensor<?xf32>, tensor<?xf32>)
/// -> tensor<?xf32>
/// ```
diff --git a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
index 8ff1134f4b7bb..3ec877a95a194 100644
--- a/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
+++ b/mlir/lib/Conversion/AffineToStandard/AffineToStandard.cpp
@@ -96,8 +96,8 @@ class AffineApplyExpander
loc, arith::CmpIPredicate::slt, remainder, zeroCst);
Value correctedRemainder =
builder.create<arith::AddIOp>(loc, remainder, rhs);
- Value result = builder.create<SelectOp>(loc, isRemainderNegative,
- correctedRemainder, remainder);
+ Value result = builder.create<arith::SelectOp>(
+ loc, isRemainderNegative, correctedRemainder, remainder);
return result;
}
@@ -134,12 +134,12 @@ class AffineApplyExpander
loc, arith::CmpIPredicate::slt, lhs, zeroCst);
Value negatedDecremented = builder.create<arith::SubIOp>(loc, noneCst, lhs);
Value dividend =
- builder.create<SelectOp>(loc, negative, negatedDecremented, lhs);
+ builder.create<arith::SelectOp>(loc, negative, negatedDecremented, lhs);
Value quotient = builder.create<arith::DivSIOp>(loc, dividend, rhs);
Value correctedQuotient =
builder.create<arith::SubIOp>(loc, noneCst, quotient);
- Value result =
- builder.create<SelectOp>(loc, negative, correctedQuotient, quotient);
+ Value result = builder.create<arith::SelectOp>(loc, negative,
+ correctedQuotient, quotient);
return result;
}
@@ -175,14 +175,14 @@ class AffineApplyExpander
Value negated = builder.create<arith::SubIOp>(loc, zeroCst, lhs);
Value decremented = builder.create<arith::SubIOp>(loc, lhs, oneCst);
Value dividend =
- builder.create<SelectOp>(loc, nonPositive, negated, decremented);
+ builder.create<arith::SelectOp>(loc, nonPositive, negated, decremented);
Value quotient = builder.create<arith::DivSIOp>(loc, dividend, rhs);
Value negatedQuotient =
builder.create<arith::SubIOp>(loc, zeroCst, quotient);
Value incrementedQuotient =
builder.create<arith::AddIOp>(loc, quotient, oneCst);
- Value result = builder.create<SelectOp>(loc, nonPositive, negatedQuotient,
- incrementedQuotient);
+ Value result = builder.create<arith::SelectOp>(
+ loc, nonPositive, negatedQuotient, incrementedQuotient);
return result;
}
@@ -259,7 +259,8 @@ static Value buildMinMaxReductionSeq(Location loc,
Value value = *valueIt++;
for (; valueIt != values.end(); ++valueIt) {
auto cmpOp = builder.create<arith::CmpIOp>(loc, predicate, value, *valueIt);
- value = builder.create<SelectOp>(loc, cmpOp.getResult(), value, *valueIt);
+ value = builder.create<arith::SelectOp>(loc, cmpOp.getResult(), value,
+ *valueIt);
}
return value;
diff --git a/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp b/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
index dd01fd71bc9c4..93090f7bb9bd5 100644
--- a/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
+++ b/mlir/lib/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.cpp
@@ -66,6 +66,8 @@ using FPToSIOpLowering =
VectorConvertToLLVMPattern<arith::FPToSIOp, LLVM::FPToSIOp>;
using BitcastOpLowering =
VectorConvertToLLVMPattern<arith::BitcastOp, LLVM::BitcastOp>;
+using SelectOpLowering =
+ VectorConvertToLLVMPattern<arith::SelectOp, LLVM::SelectOp>;
//===----------------------------------------------------------------------===//
// Op Lowering Patterns
@@ -292,7 +294,8 @@ void mlir::arith::populateArithmeticToLLVMConversionPatterns(
IndexCastOpLowering,
BitcastOpLowering,
CmpIOpLowering,
- CmpFOpLowering
+ CmpFOpLowering,
+ SelectOpLowering
>(converter);
// clang-format on
}
diff --git a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
index b3972a6b3b0bc..b9cdceda3d6d6 100644
--- a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
+++ b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
@@ -190,6 +190,15 @@ class CmpFOpNanNonePattern final : public OpConversionPattern<arith::CmpFOp> {
ConversionPatternRewriter &rewriter) const override;
};
+/// Converts arith.select to spv.Select.
+class SelectOpPattern final : public OpConversionPattern<arith::SelectOp> {
+public:
+ using OpConversionPattern<arith::SelectOp>::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(arith::SelectOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override;
+};
+
} // namespace
//===----------------------------------------------------------------------===//
@@ -780,6 +789,19 @@ LogicalResult CmpFOpNanNonePattern::matchAndRewrite(
return success();
}
+//===----------------------------------------------------------------------===//
+// SelectOpPattern
+//===----------------------------------------------------------------------===//
+
+LogicalResult
+SelectOpPattern::matchAndRewrite(arith::SelectOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const {
+ rewriter.replaceOpWithNewOp<spirv::SelectOp>(op, adaptor.getCondition(),
+ adaptor.getTrueValue(),
+ adaptor.getFalseValue());
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// Pattern Population
//===----------------------------------------------------------------------===//
@@ -820,7 +842,8 @@ void mlir::arith::populateArithmeticToSPIRVPatterns(
TypeCastingOpPattern<arith::IndexCastOp, spirv::SConvertOp>,
TypeCastingOpPattern<arith::BitcastOp, spirv::BitcastOp>,
CmpIOpBooleanPattern, CmpIOpPattern,
- CmpFOpNanNonePattern, CmpFOpPattern
+ CmpFOpNanNonePattern, CmpFOpPattern,
+ SelectOpPattern
>(typeConverter, patterns.getContext());
// clang-format on
diff --git a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
index 47d132f9e8bd9..63f862dcce6bc 100644
--- a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
+++ b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
@@ -227,10 +227,10 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
Value one = rewriter.create<arith::ConstantOp>(
loc, elementType, rewriter.getFloatAttr(elementType, 1));
Value lhsRealIsInfWithSign = rewriter.create<math::CopySignOp>(
- loc, rewriter.create<SelectOp>(loc, lhsRealInfinite, one, zero),
+ loc, rewriter.create<arith::SelectOp>(loc, lhsRealInfinite, one, zero),
lhsReal);
Value lhsImagIsInfWithSign = rewriter.create<math::CopySignOp>(
- loc, rewriter.create<SelectOp>(loc, lhsImagInfinite, one, zero),
+ loc, rewriter.create<arith::SelectOp>(loc, lhsImagInfinite, one, zero),
lhsImag);
Value lhsRealIsInfWithSignTimesRhsReal =
rewriter.create<arith::MulFOp>(loc, lhsRealIsInfWithSign, rhsReal);
@@ -265,10 +265,10 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
Value finiteNumInfiniteDenom =
rewriter.create<arith::AndIOp>(loc, lhsFinite, rhsInfinite);
Value rhsRealIsInfWithSign = rewriter.create<math::CopySignOp>(
- loc, rewriter.create<SelectOp>(loc, rhsRealInfinite, one, zero),
+ loc, rewriter.create<arith::SelectOp>(loc, rhsRealInfinite, one, zero),
rhsReal);
Value rhsImagIsInfWithSign = rewriter.create<math::CopySignOp>(
- loc, rewriter.create<SelectOp>(loc, rhsImagInfinite, one, zero),
+ loc, rewriter.create<arith::SelectOp>(loc, rhsImagInfinite, one, zero),
rhsImag);
Value rhsRealIsInfWithSignTimesLhsReal =
rewriter.create<arith::MulFOp>(loc, lhsReal, rhsRealIsInfWithSign);
@@ -289,21 +289,21 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
Value realAbsSmallerThanImagAbs = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OLT, rhsRealAbs, rhsImagAbs);
- Value resultReal = rewriter.create<SelectOp>(loc, realAbsSmallerThanImagAbs,
- resultReal1, resultReal2);
- Value resultImag = rewriter.create<SelectOp>(loc, realAbsSmallerThanImagAbs,
- resultImag1, resultImag2);
- Value resultRealSpecialCase3 = rewriter.create<SelectOp>(
+ Value resultReal = rewriter.create<arith::SelectOp>(
+ loc, realAbsSmallerThanImagAbs, resultReal1, resultReal2);
+ Value resultImag = rewriter.create<arith::SelectOp>(
+ loc, realAbsSmallerThanImagAbs, resultImag1, resultImag2);
+ Value resultRealSpecialCase3 = rewriter.create<arith::SelectOp>(
loc, finiteNumInfiniteDenom, resultReal4, resultReal);
- Value resultImagSpecialCase3 = rewriter.create<SelectOp>(
+ Value resultImagSpecialCase3 = rewriter.create<arith::SelectOp>(
loc, finiteNumInfiniteDenom, resultImag4, resultImag);
- Value resultRealSpecialCase2 = rewriter.create<SelectOp>(
+ Value resultRealSpecialCase2 = rewriter.create<arith::SelectOp>(
loc, infNumFiniteDenom, resultReal3, resultRealSpecialCase3);
- Value resultImagSpecialCase2 = rewriter.create<SelectOp>(
+ Value resultImagSpecialCase2 = rewriter.create<arith::SelectOp>(
loc, infNumFiniteDenom, resultImag3, resultImagSpecialCase3);
- Value resultRealSpecialCase1 = rewriter.create<SelectOp>(
+ Value resultRealSpecialCase1 = rewriter.create<arith::SelectOp>(
loc, resultIsInfinity, infinityResultReal, resultRealSpecialCase2);
- Value resultImagSpecialCase1 = rewriter.create<SelectOp>(
+ Value resultImagSpecialCase1 = rewriter.create<arith::SelectOp>(
loc, resultIsInfinity, infinityResultImag, resultImagSpecialCase2);
Value resultRealIsNaN = rewriter.create<arith::CmpFOp>(
@@ -312,9 +312,9 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
loc, arith::CmpFPredicate::UNO, resultImag, zero);
Value resultIsNaN =
rewriter.create<arith::AndIOp>(loc, resultRealIsNaN, resultImagIsNaN);
- Value resultRealWithSpecialCases = rewriter.create<SelectOp>(
+ Value resultRealWithSpecialCases = rewriter.create<arith::SelectOp>(
loc, resultIsNaN, resultRealSpecialCase1, resultReal);
- Value resultImagWithSpecialCases = rewriter.create<SelectOp>(
+ Value resultImagWithSpecialCases = rewriter.create<arith::SelectOp>(
loc, resultIsNaN, resultImagSpecialCase1, resultImag);
rewriter.replaceOpWithNewOp<complex::CreateOp>(
@@ -450,24 +450,26 @@ struct MulOpConversion : public OpConversionPattern<complex::MulOp> {
b.create<arith::ConstantOp>(elementType, b.getZeroAttr(elementType));
Value one = b.create<arith::ConstantOp>(elementType,
b.getFloatAttr(elementType, 1));
- Value lhsRealIsInfFloat = b.create<SelectOp>(lhsRealIsInf, one, zero);
- lhsReal = b.create<SelectOp>(
+ Value lhsRealIsInfFloat =
+ b.create<arith::SelectOp>(lhsRealIsInf, one, zero);
+ lhsReal = b.create<arith::SelectOp>(
lhsIsInf, b.create<math::CopySignOp>(lhsRealIsInfFloat, lhsReal),
lhsReal);
- Value lhsImagIsInfFloat = b.create<SelectOp>(lhsImagIsInf, one, zero);
- lhsImag = b.create<SelectOp>(
+ Value lhsImagIsInfFloat =
+ b.create<arith::SelectOp>(lhsImagIsInf, one, zero);
+ lhsImag = b.create<arith::SelectOp>(
lhsIsInf, b.create<math::CopySignOp>(lhsImagIsInfFloat, lhsImag),
lhsImag);
Value lhsIsInfAndRhsRealIsNan =
b.create<arith::AndIOp>(lhsIsInf, rhsRealIsNan);
- rhsReal =
- b.create<SelectOp>(lhsIsInfAndRhsRealIsNan,
- b.create<math::CopySignOp>(zero, rhsReal), rhsReal);
+ rhsReal = b.create<arith::SelectOp>(
+ lhsIsInfAndRhsRealIsNan, b.create<math::CopySignOp>(zero, rhsReal),
+ rhsReal);
Value lhsIsInfAndRhsImagIsNan =
b.create<arith::AndIOp>(lhsIsInf, rhsImagIsNan);
- rhsImag =
- b.create<SelectOp>(lhsIsInfAndRhsImagIsNan,
- b.create<math::CopySignOp>(zero, rhsImag), rhsImag);
+ rhsImag = b.create<arith::SelectOp>(
+ lhsIsInfAndRhsImagIsNan, b.create<math::CopySignOp>(zero, rhsImag),
+ rhsImag);
// Case 2. `rhsReal` or `rhsImag` are infinite.
Value rhsRealIsInf =
@@ -479,24 +481,26 @@ struct MulOpConversion : public OpConversionPattern<complex::MulOp> {
b.create<arith::CmpFOp>(arith::CmpFPredicate::UNO, lhsReal, lhsReal);
Value lhsImagIsNan =
b.create<arith::CmpFOp>(arith::CmpFPredicate::UNO, lhsImag, lhsImag);
- Value rhsRealIsInfFloat = b.create<SelectOp>(rhsRealIsInf, one, zero);
- rhsReal = b.create<SelectOp>(
+ Value rhsRealIsInfFloat =
+ b.create<arith::SelectOp>(rhsRealIsInf, one, zero);
+ rhsReal = b.create<arith::SelectOp>(
rhsIsInf, b.create<math::CopySignOp>(rhsRealIsInfFloat, rhsReal),
rhsReal);
- Value rhsImagIsInfFloat = b.create<SelectOp>(rhsImagIsInf, one, zero);
- rhsImag = b.create<SelectOp>(
+ Value rhsImagIsInfFloat =
+ b.create<arith::SelectOp>(rhsImagIsInf, one, zero);
+ rhsImag = b.create<arith::SelectOp>(
rhsIsInf, b.create<math::CopySignOp>(rhsImagIsInfFloat, rhsImag),
rhsImag);
Value rhsIsInfAndLhsRealIsNan =
b.create<arith::AndIOp>(rhsIsInf, lhsRealIsNan);
- lhsReal =
- b.create<SelectOp>(rhsIsInfAndLhsRealIsNan,
- b.create<math::CopySignOp>(zero, lhsReal), lhsReal);
+ lhsReal = b.create<arith::SelectOp>(
+ rhsIsInfAndLhsRealIsNan, b.create<math::CopySignOp>(zero, lhsReal),
+ lhsReal);
Value rhsIsInfAndLhsImagIsNan =
b.create<arith::AndIOp>(rhsIsInf, lhsImagIsNan);
- lhsImag =
- b.create<SelectOp>(rhsIsInfAndLhsImagIsNan,
- b.create<math::CopySignOp>(zero, lhsImag), lhsImag);
+ lhsImag = b.create<arith::SelectOp>(
+ rhsIsInfAndLhsImagIsNan, b.create<math::CopySignOp>(zero, lhsImag),
+ lhsImag);
Value recalc = b.create<arith::OrIOp>(lhsIsInf, rhsIsInf);
// Case 3. One of the pairwise products of left hand side with right hand
@@ -522,24 +526,24 @@ struct MulOpConversion : public OpConversionPattern<complex::MulOp> {
isSpecialCase = b.create<arith::AndIOp>(isSpecialCase, notRecalc);
Value isSpecialCaseAndLhsRealIsNan =
b.create<arith::AndIOp>(isSpecialCase, lhsRealIsNan);
- lhsReal =
- b.create<SelectOp>(isSpecialCaseAndLhsRealIsNan,
- b.create<math::CopySignOp>(zero, lhsReal), lhsReal);
+ lhsReal = b.create<arith::SelectOp>(
+ isSpecialCaseAndLhsRealIsNan, b.create<math::CopySignOp>(zero, lhsReal),
+ lhsReal);
Value isSpecialCaseAndLhsImagIsNan =
b.create<arith::AndIOp>(isSpecialCase, lhsImagIsNan);
- lhsImag =
- b.create<SelectOp>(isSpecialCaseAndLhsImagIsNan,
- b.create<math::CopySignOp>(zero, lhsImag), lhsImag);
+ lhsImag = b.create<arith::SelectOp>(
+ isSpecialCaseAndLhsImagIsNan, b.create<math::CopySignOp>(zero, lhsImag),
+ lhsImag);
Value isSpecialCaseAndRhsRealIsNan =
b.create<arith::AndIOp>(isSpecialCase, rhsRealIsNan);
- rhsReal =
- b.create<SelectOp>(isSpecialCaseAndRhsRealIsNan,
- b.create<math::CopySignOp>(zero, rhsReal), rhsReal);
+ rhsReal = b.create<arith::SelectOp>(
+ isSpecialCaseAndRhsRealIsNan, b.create<math::CopySignOp>(zero, rhsReal),
+ rhsReal);
Value isSpecialCaseAndRhsImagIsNan =
b.create<arith::AndIOp>(isSpecialCase, rhsImagIsNan);
- rhsImag =
- b.create<SelectOp>(isSpecialCaseAndRhsImagIsNan,
- b.create<math::CopySignOp>(zero, rhsImag), rhsImag);
+ rhsImag = b.create<arith::SelectOp>(
+ isSpecialCaseAndRhsImagIsNan, b.create<math::CopySignOp>(zero, rhsImag),
+ rhsImag);
recalc = b.create<arith::OrIOp>(recalc, isSpecialCase);
recalc = b.create<arith::AndIOp>(isNan, recalc);
@@ -548,16 +552,16 @@ struct MulOpConversion : public OpConversionPattern<complex::MulOp> {
lhsImagTimesRhsImag = b.create<arith::MulFOp>(lhsImag, rhsImag);
Value newReal =
b.create<arith::SubFOp>(lhsRealTimesRhsReal, lhsImagTimesRhsImag);
- real =
- b.create<SelectOp>(recalc, b.create<arith::MulFOp>(inf, newReal), real);
+ real = b.create<arith::SelectOp>(
+ recalc, b.create<arith::MulFOp>(inf, newReal), real);
// Recalculate imag part.
lhsImagTimesRhsReal = b.create<arith::MulFOp>(lhsImag, rhsReal);
lhsRealTimesRhsImag = b.create<arith::MulFOp>(lhsReal, rhsImag);
Value newImag =
b.create<arith::AddFOp>(lhsImagTimesRhsReal, lhsRealTimesRhsImag);
- imag =
- b.create<SelectOp>(recalc, b.create<arith::MulFOp>(inf, newImag), imag);
+ imag = b.create<arith::SelectOp>(
+ recalc, b.create<arith::MulFOp>(inf, newImag), imag);
rewriter.replaceOpWithNewOp<complex::CreateOp>(op, type, real, imag);
return success();
@@ -608,8 +612,8 @@ struct SignOpConversion : public OpConversionPattern<complex::SignOp> {
Value realSign = b.create<arith::DivFOp>(real, abs);
Value imagSign = b.create<arith::DivFOp>(imag, abs);
Value sign = b.create<complex::CreateOp>(type, realSign, imagSign);
- rewriter.replaceOpWithNewOp<SelectOp>(op, isZero, adaptor.getComplex(),
- sign);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, isZero,
+ adaptor.getComplex(), sign);
return success();
}
};
diff --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
index e472af257776b..a9e7759aa75e8 100644
--- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
+++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
@@ -71,8 +71,9 @@ template <
static bool
matchSelectReduction(Block &block, ArrayRef<Predicate> lessThanPredicates,
ArrayRef<Predicate> greaterThanPredicates, bool &isMin) {
- static_assert(llvm::is_one_of<SelectOpTy, SelectOp, LLVM::SelectOp>::value,
- "only std and llvm select ops are supported");
+ static_assert(
+ llvm::is_one_of<SelectOpTy, arith::SelectOp, LLVM::SelectOp>::value,
+ "only arithmetic and llvm select ops are supported");
// Expect exactly three operations in the block.
if (block.empty() || llvm::hasSingleElement(block) ||
@@ -290,7 +291,7 @@ static omp::ReductionDeclareOp declareReduction(PatternRewriter &builder,
// Match select-based min/max reductions.
bool isMin;
- if (matchSelectReduction<arith::CmpFOp, SelectOp>(
+ if (matchSelectReduction<arith::CmpFOp, arith::SelectOp>(
reduction, {arith::CmpFPredicate::OLT, arith::CmpFPredicate::OLE},
{arith::CmpFPredicate::OGT, arith::CmpFPredicate::OGE}, isMin) ||
matchSelectReduction<LLVM::FCmpOp, LLVM::SelectOp>(
@@ -299,7 +300,7 @@ static omp::ReductionDeclareOp declareReduction(PatternRewriter &builder,
return createDecl(builder, symbolTable, reduce,
minMaxValueForFloat(type, !isMin));
}
- if (matchSelectReduction<arith::CmpIOp, SelectOp>(
+ if (matchSelectReduction<arith::CmpIOp, arith::SelectOp>(
reduction, {arith::CmpIPredicate::slt, arith::CmpIPredicate::sle},
{arith::CmpIPredicate::sgt, arith::CmpIPredicate::sge}, isMin) ||
matchSelectReduction<LLVM::ICmpOp, LLVM::SelectOp>(
@@ -311,7 +312,7 @@ static omp::ReductionDeclareOp declareReduction(PatternRewriter &builder,
isMin ? LLVM::AtomicBinOp::min : LLVM::AtomicBinOp::max,
decl, reduce);
}
- if (matchSelectReduction<arith::CmpIOp, SelectOp>(
+ if (matchSelectReduction<arith::CmpIOp, arith::SelectOp>(
reduction, {arith::CmpIPredicate::ult, arith::CmpIPredicate::ule},
{arith::CmpIPredicate::ugt, arith::CmpIPredicate::uge}, isMin) ||
matchSelectReduction<LLVM::ICmpOp, LLVM::SelectOp>(
diff --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
index 7e0c4a7c5cd4a..a7cf8a80fe8c2 100644
--- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
@@ -107,8 +107,8 @@ Value getBroadcastedDim(ImplicitLocOpBuilder lb, ValueRange extentTensors,
Value dimIsOne =
b.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
lesserRankOperandExtent, one);
- Value dim = b.create<SelectOp>(loc, dimIsOne, broadcastedDim,
- lesserRankOperandExtent);
+ Value dim = b.create<arith::SelectOp>(
+ loc, dimIsOne, broadcastedDim, lesserRankOperandExtent);
b.create<scf::YieldOp>(loc, dim);
})
.getResult(0);
@@ -144,7 +144,7 @@ LogicalResult BroadcastOpConverter::matchAndRewrite(
for (Value v : llvm::drop_begin(ranks, 1)) {
Value rankIsGreater =
lb.create<arith::CmpIOp>(arith::CmpIPredicate::ugt, v, maxRank);
- maxRank = lb.create<SelectOp>(rankIsGreater, v, maxRank);
+ maxRank = lb.create<arith::SelectOp>(rankIsGreater, v, maxRank);
}
// Calculate the
diff erence of ranks and the maximum rank for later offsets.
@@ -259,7 +259,7 @@ LogicalResult IsBroadcastableOpConverter::matchAndRewrite(
for (Value v : llvm::drop_begin(ranks, 1)) {
Value rankIsGreater =
lb.create<arith::CmpIOp>(arith::CmpIPredicate::ugt, v, maxRank);
- maxRank = lb.create<SelectOp>(rankIsGreater, v, maxRank);
+ maxRank = lb.create<arith::SelectOp>(rankIsGreater, v, maxRank);
}
// Calculate the
diff erence of ranks and the maximum rank for later offsets.
@@ -619,7 +619,7 @@ LogicalResult SplitAtOpConversion::matchAndRewrite(
Value add = b.create<arith::AddIOp>(originalIndex, rank);
Value indexIsNegative =
b.create<arith::CmpIOp>(arith::CmpIPredicate::slt, originalIndex, zero);
- Value index = b.create<SelectOp>(indexIsNegative, add, originalIndex);
+ Value index = b.create<arith::SelectOp>(indexIsNegative, add, originalIndex);
Value one = b.create<arith::ConstantIndexOp>(1);
Value head =
diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index 467e51c5d5fad..b2e18ab8196f4 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -387,9 +387,6 @@ struct BarePtrFuncOpConversion : public FuncOpConversionBase {
}
};
-// Straightforward lowerings.
-using SelectOpLowering = VectorConvertToLLVMPattern<SelectOp, LLVM::SelectOp>;
-
/// Lower `std.assert`. The default lowering calls the `abort` function if the
/// assertion is violated and has no effect otherwise. The failure message is
/// ignored by the default lowering but should be propagated by any custom
@@ -685,7 +682,6 @@ void mlir::populateStdToLLVMConversionPatterns(LLVMTypeConverter &converter,
CondBranchOpLowering,
ConstantOpLowering,
ReturnOpLowering,
- SelectOpLowering,
SwitchOpLowering>(converter);
// clang-format on
}
diff --git a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
index 003b5cc75f97a..7e1269013af58 100644
--- a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
+++ b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRV.cpp
@@ -46,15 +46,6 @@ class ReturnOpPattern final : public OpConversionPattern<ReturnOp> {
ConversionPatternRewriter &rewriter) const override;
};
-/// Converts std.select to spv.Select.
-class SelectOpPattern final : public OpConversionPattern<SelectOp> {
-public:
- using OpConversionPattern<SelectOp>::OpConversionPattern;
- LogicalResult
- matchAndRewrite(SelectOp op, OpAdaptor adaptor,
- ConversionPatternRewriter &rewriter) const override;
-};
-
/// Converts std.br to spv.Branch.
struct BranchOpPattern final : public OpConversionPattern<BranchOp> {
using OpConversionPattern<BranchOp>::OpConversionPattern;
@@ -155,19 +146,6 @@ ReturnOpPattern::matchAndRewrite(ReturnOp returnOp, OpAdaptor adaptor,
return success();
}
-//===----------------------------------------------------------------------===//
-// SelectOp
-//===----------------------------------------------------------------------===//
-
-LogicalResult
-SelectOpPattern::matchAndRewrite(SelectOp op, OpAdaptor adaptor,
- ConversionPatternRewriter &rewriter) const {
- rewriter.replaceOpWithNewOp<spirv::SelectOp>(op, adaptor.getCondition(),
- adaptor.getTrueValue(),
- adaptor.getFalseValue());
- return success();
-}
-
//===----------------------------------------------------------------------===//
// BranchOpPattern
//===----------------------------------------------------------------------===//
@@ -211,8 +189,8 @@ void populateStandardToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
spirv::ElementwiseOpPattern<arith::MinSIOp, spirv::GLSLSMinOp>,
spirv::ElementwiseOpPattern<arith::MinUIOp, spirv::GLSLUMinOp>,
- ReturnOpPattern, SelectOpPattern, BranchOpPattern, CondBranchOpPattern>(
- typeConverter, context);
+ ReturnOpPattern, BranchOpPattern, CondBranchOpPattern>(typeConverter,
+ context);
}
void populateTensorToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 8d98fb0a3a93b..dad77e6612db0 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -59,7 +59,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
auto cmp = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt,
args[0], zero);
auto neg = rewriter.create<arith::SubIOp>(loc, zero, args[0]);
- return rewriter.create<mlir::SelectOp>(loc, cmp, args[0], neg);
+ return rewriter.create<arith::SelectOp>(loc, cmp, args[0], neg);
}
// tosa::AddOp
@@ -380,33 +380,33 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
if (isa<tosa::SelectOp>(op)) {
elementTy = op->getOperand(1).getType().cast<ShapedType>().getElementType();
if (elementTy.isa<FloatType>() || elementTy.isa<IntegerType>())
- return rewriter.create<mlir::SelectOp>(loc, args[0], args[1], args[2]);
+ return rewriter.create<arith::SelectOp>(loc, args[0], args[1], args[2]);
}
// tosa::MaximumOp
if (isa<tosa::MaximumOp>(op) && elementTy.isa<FloatType>()) {
auto predicate = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OGT, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::MaximumOp>(op) && elementTy.isSignlessInteger()) {
auto predicate = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::sgt, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
// tosa::MinimumOp
if (isa<tosa::MinimumOp>(op) && elementTy.isa<FloatType>()) {
auto predicate = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OLT, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::MinimumOp>(op) && elementTy.isSignlessInteger()) {
auto predicate = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
// tosa::CeilOp
@@ -558,7 +558,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
auto negative = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OLT, args[0], zero);
auto rounded =
- rewriter.create<mlir::SelectOp>(loc, negative, subbed, added);
+ rewriter.create<arith::SelectOp>(loc, negative, subbed, added);
auto clamped = clampHelper<arith::CmpFOp>(
loc, rounded, intMin, intMax, arith::CmpFPredicate::OLT, rewriter);
@@ -792,25 +792,25 @@ static Value createLinalgBodyCalculationForReduceOp(Operation *op,
if (isa<tosa::ReduceMinOp>(op) && elementTy.isa<FloatType>()) {
auto predicate = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OLT, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::ReduceMinOp>(op) && elementTy.isa<IntegerType>()) {
auto predicate = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::ReduceMaxOp>(op) && elementTy.isa<FloatType>()) {
auto predicate = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OGT, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::ReduceMaxOp>(op) && elementTy.isa<IntegerType>()) {
auto predicate = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::sgt, args[0], args[1]);
- return rewriter.create<mlir::SelectOp>(loc, predicate, args[0], args[1]);
+ return rewriter.create<arith::SelectOp>(loc, predicate, args[0], args[1]);
}
if (isa<tosa::ReduceAllOp>(op) && elementTy.isInteger(1))
@@ -1525,9 +1525,9 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
loc, rewriter.getI32IntegerAttr(1));
auto yOffset =
- rewriter.create<mlir::SelectOp>(loc, yPred, oneVal, zeroVal);
+ rewriter.create<arith::SelectOp>(loc, yPred, oneVal, zeroVal);
auto xOffset =
- rewriter.create<mlir::SelectOp>(loc, xPred, oneVal, zeroVal);
+ rewriter.create<arith::SelectOp>(loc, xPred, oneVal, zeroVal);
iy = rewriter.create<arith::AddIOp>(loc, iy, yOffset);
ix = rewriter.create<arith::AddIOp>(loc, ix, xOffset);
@@ -2052,9 +2052,9 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
return;
}
- auto resultMax = rewriter.create<mlir::SelectOp>(nestedLoc, predicate,
- newValue, oldValue);
- auto resultIndex = rewriter.create<mlir::SelectOp>(
+ auto resultMax = rewriter.create<arith::SelectOp>(
+ nestedLoc, predicate, newValue, oldValue);
+ auto resultIndex = rewriter.create<arith::SelectOp>(
nestedLoc, predicate, newIndex, oldIndex);
nestedBuilder.create<linalg::YieldOp>(
nestedLoc, ValueRange({resultIndex, resultMax}));
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
index 4fcd2cf56c35c..a7fe498e98714 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
@@ -748,7 +748,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
Value cmp = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, dx, zero);
- Value offset = rewriter.create<mlir::SelectOp>(loc, cmp, dx, zero);
+ Value offset = rewriter.create<arith::SelectOp>(loc, cmp, dx, zero);
return rewriter.create<arith::AddIOp>(loc, v, offset)->getResult(0);
};
@@ -758,7 +758,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
auto kH2 = padFn(kH1, y1, pad[3]);
auto kHCmp = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, kH2, one);
- auto kH3 = rewriter.create<mlir::SelectOp>(loc, kHCmp, one, kH2);
+ auto kH3 = rewriter.create<arith::SelectOp>(loc, kHCmp, one, kH2);
// compute the horizontal component of coverage.
auto kW0 = rewriter.create<arith::ConstantIndexOp>(loc, kernel[1]);
@@ -766,7 +766,7 @@ class AvgPool2dConverter : public OpRewritePattern<tosa::AvgPool2dOp> {
auto kW2 = padFn(kW1, x1, pad[5]);
auto kWCmp = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, kW2, one);
- auto kW3 = rewriter.create<mlir::SelectOp>(loc, kWCmp, one, kW2);
+ auto kW3 = rewriter.create<arith::SelectOp>(loc, kWCmp, one, kW2);
// Compute the total number of elements and normalize.
Value count = rewriter.create<arith::MulIOp>(loc, kH3, kW3);
diff --git a/mlir/lib/Conversion/TosaToStandard/TosaToStandard.cpp b/mlir/lib/Conversion/TosaToStandard/TosaToStandard.cpp
index a150c2c2d8ae7..6e1c4e99165f7 100644
--- a/mlir/lib/Conversion/TosaToStandard/TosaToStandard.cpp
+++ b/mlir/lib/Conversion/TosaToStandard/TosaToStandard.cpp
@@ -135,7 +135,7 @@ class ApplyScaleOpConverter : public OpRewritePattern<tosa::ApplyScaleOp> {
Value valueGreaterThanZero = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::sge, value32, zero32);
- Value doubleRound64 = rewriter.create<mlir::SelectOp>(
+ Value doubleRound64 = rewriter.create<arith::SelectOp>(
loc, valueGreaterThanZero, roundAdd64, roundSub64);
// We only perform double rounding if the shift value is greater than 32.
@@ -143,8 +143,8 @@ class ApplyScaleOpConverter : public OpRewritePattern<tosa::ApplyScaleOp> {
loc, getConstantAttr(i32Ty, 32, rewriter));
Value shiftGreaterThanThirtyTwo = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::sge, shift32, thirtyTwo32);
- round64 = rewriter.create<mlir::SelectOp>(loc, shiftGreaterThanThirtyTwo,
- doubleRound64, round64);
+ round64 = rewriter.create<arith::SelectOp>(loc, shiftGreaterThanThirtyTwo,
+ doubleRound64, round64);
}
// The computation below equates to the following pseudocode:
diff --git a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
index 6073058704fb8..fafd7a47c5c5b 100644
--- a/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
+++ b/mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
@@ -1436,8 +1436,8 @@ static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
Value result = newYieldOp->getOperand(i);
Value iterArg = cast<AffineForOp>(newParentOp).getRegionIterArgs()[i];
- Value maskedResult = state.builder.create<SelectOp>(result.getLoc(), mask,
- result, iterArg);
+ Value maskedResult = state.builder.create<arith::SelectOp>(
+ result.getLoc(), mask, result, iterArg);
LLVM_DEBUG(
dbgs() << "\n[early-vect]+++++ masking a yielded vector value: "
<< maskedResult);
diff --git a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
index a4b9509d61690..c836444ffb6c8 100644
--- a/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
+++ b/mlir/lib/Dialect/Arithmetic/IR/ArithmeticOps.cpp
@@ -1392,6 +1392,173 @@ OpFoldResult arith::CmpFOp::fold(ArrayRef<Attribute> operands) {
return BoolAttr::get(getContext(), val);
}
+//===----------------------------------------------------------------------===//
+// SelectOp
+//===----------------------------------------------------------------------===//
+
+// Transforms a select of a boolean to arithmetic operations
+//
+// arith.select %arg, %x, %y : i1
+//
+// becomes
+//
+// and(%arg, %x) or and(!%arg, %y)
+struct SelectI1Simplify : public OpRewritePattern<arith::SelectOp> {
+ using OpRewritePattern<arith::SelectOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(arith::SelectOp op,
+ PatternRewriter &rewriter) const override {
+ if (!op.getType().isInteger(1))
+ return failure();
+
+ Value falseConstant =
+ rewriter.create<arith::ConstantIntOp>(op.getLoc(), true, 1);
+ Value notCondition = rewriter.create<arith::XOrIOp>(
+ op.getLoc(), op.getCondition(), falseConstant);
+
+ Value trueVal = rewriter.create<arith::AndIOp>(
+ op.getLoc(), op.getCondition(), op.getTrueValue());
+ Value falseVal = rewriter.create<arith::AndIOp>(op.getLoc(), notCondition,
+ op.getFalseValue());
+ rewriter.replaceOpWithNewOp<arith::OrIOp>(op, trueVal, falseVal);
+ return success();
+ }
+};
+
+// select %arg, %c1, %c0 => extui %arg
+struct SelectToExtUI : public OpRewritePattern<arith::SelectOp> {
+ using OpRewritePattern<arith::SelectOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(arith::SelectOp op,
+ PatternRewriter &rewriter) const override {
+ // Cannot extui i1 to i1, or i1 to f32
+ if (!op.getType().isa<IntegerType>() || op.getType().isInteger(1))
+ return failure();
+
+ // select %x, c1, %c0 => extui %arg
+ if (matchPattern(op.getTrueValue(), m_One()))
+ if (matchPattern(op.getFalseValue(), m_Zero())) {
+ rewriter.replaceOpWithNewOp<arith::ExtUIOp>(op, op.getType(),
+ op.getCondition());
+ return success();
+ }
+
+ // select %x, c0, %c1 => extui (xor %arg, true)
+ if (matchPattern(op.getTrueValue(), m_Zero()))
+ if (matchPattern(op.getFalseValue(), m_One())) {
+ rewriter.replaceOpWithNewOp<arith::ExtUIOp>(
+ op, op.getType(),
+ rewriter.create<arith::XOrIOp>(
+ op.getLoc(), op.getCondition(),
+ rewriter.create<arith::ConstantIntOp>(
+ op.getLoc(), 1, op.getCondition().getType())));
+ return success();
+ }
+
+ return failure();
+ }
+};
+
+void arith::SelectOp::getCanonicalizationPatterns(RewritePatternSet &results,
+ MLIRContext *context) {
+ results.insert<SelectI1Simplify, SelectToExtUI>(context);
+}
+
+OpFoldResult arith::SelectOp::fold(ArrayRef<Attribute> operands) {
+ Value trueVal = getTrueValue();
+ Value falseVal = getFalseValue();
+ if (trueVal == falseVal)
+ return trueVal;
+
+ Value condition = getCondition();
+
+ // select true, %0, %1 => %0
+ if (matchPattern(condition, m_One()))
+ return trueVal;
+
+ // select false, %0, %1 => %1
+ if (matchPattern(condition, m_Zero()))
+ return falseVal;
+
+ // select %x, true, false => %x
+ if (getType().isInteger(1))
+ if (matchPattern(getTrueValue(), m_One()))
+ if (matchPattern(getFalseValue(), m_Zero()))
+ return condition;
+
+ if (auto cmp = dyn_cast_or_null<arith::CmpIOp>(condition.getDefiningOp())) {
+ auto pred = cmp.getPredicate();
+ if (pred == arith::CmpIPredicate::eq || pred == arith::CmpIPredicate::ne) {
+ auto cmpLhs = cmp.getLhs();
+ auto cmpRhs = cmp.getRhs();
+
+ // %0 = arith.cmpi eq, %arg0, %arg1
+ // %1 = arith.select %0, %arg0, %arg1 => %arg1
+
+ // %0 = arith.cmpi ne, %arg0, %arg1
+ // %1 = arith.select %0, %arg0, %arg1 => %arg0
+
+ if ((cmpLhs == trueVal && cmpRhs == falseVal) ||
+ (cmpRhs == trueVal && cmpLhs == falseVal))
+ return pred == arith::CmpIPredicate::ne ? trueVal : falseVal;
+ }
+ }
+ return nullptr;
+}
+
+static void print(OpAsmPrinter &p, arith::SelectOp op) {
+ p << " " << op.getOperands();
+ p.printOptionalAttrDict(op->getAttrs());
+ p << " : ";
+ if (ShapedType condType = op.getCondition().getType().dyn_cast<ShapedType>())
+ p << condType << ", ";
+ p << op.getType();
+}
+
+static ParseResult parseSelectOp(OpAsmParser &parser, OperationState &result) {
+ Type conditionType, resultType;
+ SmallVector<OpAsmParser::OperandType, 3> operands;
+ if (parser.parseOperandList(operands, /*requiredOperandCount=*/3) ||
+ parser.parseOptionalAttrDict(result.attributes) ||
+ parser.parseColonType(resultType))
+ return failure();
+
+ // Check for the explicit condition type if this is a masked tensor or vector.
+ if (succeeded(parser.parseOptionalComma())) {
+ conditionType = resultType;
+ if (parser.parseType(resultType))
+ return failure();
+ } else {
+ conditionType = parser.getBuilder().getI1Type();
+ }
+
+ result.addTypes(resultType);
+ return parser.resolveOperands(operands,
+ {conditionType, resultType, resultType},
+ parser.getNameLoc(), result.operands);
+}
+
+LogicalResult arith::SelectOp::verify() {
+ Type conditionType = getCondition().getType();
+ if (conditionType.isSignlessInteger(1))
+ return success();
+
+ // If the result type is a vector or tensor, the type can be a mask with the
+ // same elements.
+ Type resultType = getType();
+ if (!resultType.isa<TensorType, VectorType>())
+ return emitOpError() << "expected condition to be a signless i1, but got "
+ << conditionType;
+ Type shapedConditionType = getI1SameShape(resultType);
+ if (conditionType != shapedConditionType) {
+ return emitOpError() << "expected condition type to have the same shape "
+ "as the result type, expected "
+ << shapedConditionType << ", but got "
+ << conditionType;
+ }
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// Atomic Enum
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Arithmetic/IR/CMakeLists.txt b/mlir/lib/Dialect/Arithmetic/IR/CMakeLists.txt
index 211972e777a8b..bda294173c128 100644
--- a/mlir/lib/Dialect/Arithmetic/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Arithmetic/IR/CMakeLists.txt
@@ -14,5 +14,6 @@ add_mlir_dialect_library(MLIRArithmetic
LINK_LIBS PUBLIC
MLIRDialect
+ MLIRInferTypeOpInterface
MLIRIR
)
diff --git a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
index 6073ad9c7b361..c4e1632d8023a 100644
--- a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -14,12 +14,10 @@
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
+using namespace mlir;
using namespace mlir::bufferization;
-namespace mlir {
-namespace arith {
namespace {
-
/// Bufferization of arith.constant. Replace with memref.get_global.
struct ConstantOpInterface
: public BufferizableOpInterface::ExternalModel<ConstantOpInterface,
@@ -102,12 +100,61 @@ struct IndexCastOpInterface
}
};
+/// Bufferization of arith.select. Just replace the operands.
+struct SelectOpInterface
+ : public BufferizableOpInterface::ExternalModel<SelectOpInterface,
+ arith::SelectOp> {
+ bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
+ const BufferizationState &state) const {
+ return false;
+ }
+
+ bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
+ const BufferizationState &state) const {
+ return false;
+ }
+
+ OpResult getAliasingOpResult(Operation *op, OpOperand &opOperand,
+ const BufferizationState &state) const {
+ return op->getOpResult(0) /*result*/;
+ }
+
+ SmallVector<OpOperand *>
+ getAliasingOpOperand(Operation *op, OpResult opResult,
+ const BufferizationState &state) const {
+ return {&op->getOpOperand(1) /*true_value*/,
+ &op->getOpOperand(2) /*false_value*/};
+ }
+
+ LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
+ const BufferizationState &state) const {
+ auto selectOp = cast<arith::SelectOp>(op);
+
+ // `getBuffer` introduces copies if an OpOperand bufferizes out-of-place.
+ // TODO: It would be more efficient to copy the result of the `select` op
+ // instead of its OpOperands. In the worst case, 2 copies are inserted at
+ // the moment (one for each tensor). When copying the op result, only one
+ // copy would be needed.
+ Value trueBuffer =
+ *state.getBuffer(rewriter, selectOp->getOpOperand(1) /*true_value*/);
+ Value falseBuffer =
+ *state.getBuffer(rewriter, selectOp->getOpOperand(2) /*false_value*/);
+ replaceOpWithNewBufferizedOp<arith::SelectOp>(
+ rewriter, op, selectOp.getCondition(), trueBuffer, falseBuffer);
+ return success();
+ }
+
+ BufferRelation bufferRelation(Operation *op, OpResult opResult,
+ const BufferizationState &state) const {
+ return BufferRelation::None;
+ }
+};
+
} // namespace
-} // namespace arith
-} // namespace mlir
void mlir::arith::registerBufferizableOpInterfaceExternalModels(
DialectRegistry ®istry) {
registry.addOpInterface<ConstantOp, ConstantOpInterface>();
registry.addOpInterface<IndexCastOp, IndexCastOpInterface>();
+ registry.addOpInterface<SelectOp, SelectOpInterface>();
}
diff --git a/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
index 127b27effd8db..42ab36297be49 100644
--- a/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/Arithmetic/Transforms/ExpandOps.cpp
@@ -40,7 +40,7 @@ struct CeilDivUIOpConverter : public OpRewritePattern<arith::CeilDivUIOp> {
Value minusOne = rewriter.create<arith::SubIOp>(loc, a, one);
Value quotient = rewriter.create<arith::DivUIOp>(loc, minusOne, b);
Value plusOne = rewriter.create<arith::AddIOp>(loc, quotient, one);
- rewriter.replaceOpWithNewOp<SelectOp>(op, compare, zero, plusOne);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, compare, zero, plusOne);
return success();
}
};
@@ -62,7 +62,7 @@ struct CeilDivSIOpConverter : public OpRewritePattern<arith::CeilDivSIOp> {
// Compute x = (b>0) ? -1 : 1.
Value compare =
rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt, b, zero);
- Value x = rewriter.create<SelectOp>(loc, compare, minusOne, plusOne);
+ Value x = rewriter.create<arith::SelectOp>(loc, compare, minusOne, plusOne);
// Compute positive res: 1 + ((x+a)/b).
Value xPlusA = rewriter.create<arith::AddIOp>(loc, x, a);
Value xPlusADivB = rewriter.create<arith::DivSIOp>(loc, xPlusA, b);
@@ -91,7 +91,8 @@ struct CeilDivSIOpConverter : public OpRewritePattern<arith::CeilDivSIOp> {
Value compareRes =
rewriter.create<arith::OrIOp>(loc, firstTerm, secondTerm);
// Perform substitution and return success.
- rewriter.replaceOpWithNewOp<SelectOp>(op, compareRes, posRes, negRes);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, compareRes, posRes,
+ negRes);
return success();
}
};
@@ -113,7 +114,7 @@ struct FloorDivSIOpConverter : public OpRewritePattern<arith::FloorDivSIOp> {
// Compute x = (b<0) ? 1 : -1.
Value compare =
rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, b, zero);
- Value x = rewriter.create<SelectOp>(loc, compare, plusOne, minusOne);
+ Value x = rewriter.create<arith::SelectOp>(loc, compare, plusOne, minusOne);
// Compute negative res: -1 - ((x-a)/b).
Value xMinusA = rewriter.create<arith::SubIOp>(loc, x, a);
Value xMinusADivB = rewriter.create<arith::DivSIOp>(loc, xMinusA, b);
@@ -140,7 +141,8 @@ struct FloorDivSIOpConverter : public OpRewritePattern<arith::FloorDivSIOp> {
Value compareRes =
rewriter.create<arith::OrIOp>(loc, firstTerm, secondTerm);
// Perform substitution and return success.
- rewriter.replaceOpWithNewOp<SelectOp>(op, compareRes, negRes, posRes);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, compareRes, negRes,
+ posRes);
return success();
}
};
@@ -161,12 +163,12 @@ struct MaxMinFOpConverter : public OpRewritePattern<OpTy> {
pred == arith::CmpFPredicate::ULT,
"pred must be either UGT or ULT");
Value cmp = rewriter.create<arith::CmpFOp>(loc, pred, lhs, rhs);
- Value select = rewriter.create<SelectOp>(loc, cmp, lhs, rhs);
+ Value select = rewriter.create<arith::SelectOp>(loc, cmp, lhs, rhs);
// Handle the case where rhs is NaN: 'isNaN(rhs) ? rhs : select'.
Value isNaN = rewriter.create<arith::CmpFOp>(loc, arith::CmpFPredicate::UNO,
rhs, rhs);
- rewriter.replaceOpWithNewOp<SelectOp>(op, isNaN, rhs, select);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, isNaN, rhs, select);
return success();
}
};
@@ -182,7 +184,7 @@ struct MaxMinIOpConverter : public OpRewritePattern<OpTy> {
Location loc = op.getLoc();
Value cmp = rewriter.create<arith::CmpIOp>(loc, pred, lhs, rhs);
- rewriter.replaceOpWithNewOp<SelectOp>(op, cmp, lhs, rhs);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, cmp, lhs, rhs);
return success();
}
};
diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
index e87b5157ed059..e1c91fbbc1d98 100644
--- a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
+++ b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
@@ -404,12 +404,12 @@ static ParallelComputeFunction createParallelComputeFunction(
} else {
// Select nested loop lower/upper bounds depending on our position in
// the multi-dimensional iteration space.
- auto lb = nb.create<SelectOp>(isBlockFirstCoord[loopIdx],
- blockFirstCoord[loopIdx + 1], c0);
+ auto lb = nb.create<arith::SelectOp>(
+ isBlockFirstCoord[loopIdx], blockFirstCoord[loopIdx + 1], c0);
- auto ub = nb.create<SelectOp>(isBlockLastCoord[loopIdx],
- blockEndCoord[loopIdx + 1],
- tripCounts[loopIdx + 1]);
+ auto ub = nb.create<arith::SelectOp>(isBlockLastCoord[loopIdx],
+ blockEndCoord[loopIdx + 1],
+ tripCounts[loopIdx + 1]);
nb.create<scf::ForOp>(lb, ub, c1, ValueRange(),
workLoopBuilder(loopIdx + 1));
@@ -831,8 +831,8 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op,
arith::CmpIPredicate::sgt, numWorkerThreadsVal, bracketBegin);
Value bracketScalingFactor = b.create<arith::ConstantFloatOp>(
llvm::APFloat(p.second), b.getF32Type());
- scalingFactor =
- b.create<SelectOp>(inBracket, bracketScalingFactor, scalingFactor);
+ scalingFactor = b.create<arith::SelectOp>(inBracket, bracketScalingFactor,
+ scalingFactor);
}
Value numWorkersIndex =
b.create<arith::IndexCastOp>(numWorkerThreadsVal, b.getI32Type());
diff --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
index ddb5bcd7ebeb9..4a7f80ee0d535 100644
--- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp
@@ -258,7 +258,7 @@ struct GpuAllReduceRewriter {
AccumulatorFactory getCmpFactory() const {
return [&](Value lhs, Value rhs) {
Value cmp = rewriter.create<T>(loc, predicate, lhs, rhs);
- return rewriter.create<SelectOp>(loc, cmp, lhs, rhs);
+ return rewriter.create<arith::SelectOp>(loc, cmp, lhs, rhs);
};
}
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index e3d9101ded857..3f93b3af00260 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -59,7 +59,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
/// operations may not have side-effects, as otherwise sinking (and hence
/// duplicating them) is not legal.
static bool isSinkingBeneficiary(Operation *op) {
- return isa<arith::ConstantOp, ConstantOp, memref::DimOp, SelectOp,
+ return isa<arith::ConstantOp, ConstantOp, memref::DimOp, arith::SelectOp,
arith::CmpIOp>(op);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
index 159dfe49d6b70..7597ff8be1ff5 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp
@@ -17,7 +17,6 @@
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/SCF/BufferizableOpInterfaceImpl.h"
-#include "mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Pass/Pass.h"
@@ -56,7 +55,6 @@ struct LinalgComprehensiveModuleBufferize
linalg_ext::registerBufferizableOpInterfaceExternalModels(registry);
scf::registerBufferizableOpInterfaceExternalModels(registry);
std_ext::registerModuleBufferizationExternalModels(registry);
- mlir::registerBufferizableOpInterfaceExternalModels(registry);
tensor::registerBufferizableOpInterfaceExternalModels(registry);
vector::registerBufferizableOpInterfaceExternalModels(registry);
}
diff --git a/mlir/lib/Dialect/Math/Transforms/ExpandTanh.cpp b/mlir/lib/Dialect/Math/Transforms/ExpandTanh.cpp
index 3c3d7a80b6bf1..9416ac544f176 100644
--- a/mlir/lib/Dialect/Math/Transforms/ExpandTanh.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/ExpandTanh.cpp
@@ -49,7 +49,8 @@ static LogicalResult convertTanhOp(math::TanhOp op, PatternRewriter &rewriter) {
Value zero = rewriter.create<arith::ConstantOp>(loc, floatZero);
Value cmpRes = rewriter.create<arith::CmpFOp>(loc, arith::CmpFPredicate::OGE,
op.getOperand(), zero);
- rewriter.replaceOpWithNewOp<SelectOp>(op, cmpRes, positiveRes, negativeRes);
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, cmpRes, positiveRes,
+ negativeRes);
return success();
}
diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
index b9381738c35ba..1237f0b47cf72 100644
--- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
@@ -179,12 +179,12 @@ static Value f32FromBits(ImplicitLocOpBuilder &builder, uint32_t bits) {
//----------------------------------------------------------------------------//
static Value min(ImplicitLocOpBuilder &builder, Value a, Value b) {
- return builder.create<SelectOp>(
+ return builder.create<arith::SelectOp>(
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, a, b), a, b);
}
static Value max(ImplicitLocOpBuilder &builder, Value a, Value b) {
- return builder.create<SelectOp>(
+ return builder.create<arith::SelectOp>(
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, a, b), a, b);
}
@@ -311,7 +311,7 @@ AtanApproximation::matchAndRewrite(math::AtanOp op,
Value reciprocal = builder.create<arith::DivFOp>(one, abs);
Value compare =
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, abs, reciprocal);
- Value x = builder.create<SelectOp>(compare, abs, reciprocal);
+ Value x = builder.create<arith::SelectOp>(compare, abs, reciprocal);
// Perform the Taylor series approximation for atan over the range
// [-1.0, 1.0].
@@ -328,7 +328,7 @@ AtanApproximation::matchAndRewrite(math::AtanOp op,
// Remap the solution for over [0.0, 1.0] to [0.0, inf]
auto halfPi = broadcast(builder, f32Cst(builder, 1.57079632679f), shape);
Value sub = builder.create<arith::SubFOp>(halfPi, p);
- Value select = builder.create<SelectOp>(compare, p, sub);
+ Value select = builder.create<arith::SelectOp>(compare, p, sub);
// Correct for signing of the input.
rewriter.replaceOpWithNewOp<math::CopySignOp>(op, select, operand);
@@ -371,11 +371,11 @@ Atan2Approximation::matchAndRewrite(math::Atan2Op op,
auto subPi = builder.create<arith::SubFOp>(atan, pi);
auto atanGt =
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, atan, zero);
- auto flippedAtan = builder.create<SelectOp>(atanGt, subPi, addPi);
+ auto flippedAtan = builder.create<arith::SelectOp>(atanGt, subPi, addPi);
// Determine whether to directly use atan or use the 180 degree flip
auto xGt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, x, zero);
- Value result = builder.create<SelectOp>(xGt, atan, flippedAtan);
+ Value result = builder.create<arith::SelectOp>(xGt, atan, flippedAtan);
// Handle x = 0, y > 0
Value xZero =
@@ -383,22 +383,22 @@ Atan2Approximation::matchAndRewrite(math::Atan2Op op,
Value yGt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, y, zero);
Value isHalfPi = builder.create<arith::AndIOp>(xZero, yGt);
auto halfPi = broadcast(builder, f32Cst(builder, 1.57079632679f), shape);
- result = builder.create<SelectOp>(isHalfPi, halfPi, result);
+ result = builder.create<arith::SelectOp>(isHalfPi, halfPi, result);
// Handle x = 0, y < 0
Value yLt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, y, zero);
Value isNegativeHalfPiPi = builder.create<arith::AndIOp>(xZero, yLt);
auto negativeHalfPiPi =
broadcast(builder, f32Cst(builder, -1.57079632679f), shape);
- result =
- builder.create<SelectOp>(isNegativeHalfPiPi, negativeHalfPiPi, result);
+ result = builder.create<arith::SelectOp>(isNegativeHalfPiPi, negativeHalfPiPi,
+ result);
// Handle x = 0, y = 0;
Value yZero =
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, y, zero);
Value isNan = builder.create<arith::AndIOp>(xZero, yZero);
Value cstNan = broadcast(builder, f32FromBits(builder, 0x7fc00000), shape);
- result = builder.create<SelectOp>(isNan, cstNan, result);
+ result = builder.create<arith::SelectOp>(isNan, cstNan, result);
rewriter.replaceOp(op, result);
return success();
@@ -475,8 +475,8 @@ TanhApproximation::matchAndRewrite(math::TanhOp op,
q = builder.create<math::FmaOp>(x2, q, beta0);
// Divide the numerator by the denominator.
- Value res = builder.create<SelectOp>(tinyMask, x,
- builder.create<arith::DivFOp>(p, q));
+ Value res = builder.create<arith::SelectOp>(
+ tinyMask, x, builder.create<arith::DivFOp>(p, q));
rewriter.replaceOp(op, res);
@@ -561,11 +561,11 @@ LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter,
// } else { x = x - 1.0; }
Value mask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, x,
cstCephesSQRTHF);
- Value tmp = builder.create<SelectOp>(mask, x, cstZero);
+ Value tmp = builder.create<arith::SelectOp>(mask, x, cstZero);
x = builder.create<arith::SubFOp>(x, cstOne);
e = builder.create<arith::SubFOp>(
- e, builder.create<SelectOp>(mask, cstOne, cstZero));
+ e, builder.create<arith::SelectOp>(mask, cstOne, cstZero));
x = builder.create<arith::AddFOp>(x, tmp);
Value x2 = builder.create<arith::MulFOp>(x, x);
@@ -605,11 +605,11 @@ LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter,
// • x == 0 -> -INF
// • x < 0 -> NAN
// • x == +INF -> +INF
- Value aproximation = builder.create<SelectOp>(
+ Value aproximation = builder.create<arith::SelectOp>(
zeroMask, cstMinusInf,
- builder.create<SelectOp>(
+ builder.create<arith::SelectOp>(
invalidMask, cstNan,
- builder.create<SelectOp>(posInfMask, cstPosInf, x)));
+ builder.create<arith::SelectOp>(posInfMask, cstPosInf, x)));
rewriter.replaceOp(op, aproximation);
@@ -683,7 +683,7 @@ Log1pApproximation::matchAndRewrite(math::Log1pOp op,
Value logLarge = builder.create<arith::MulFOp>(
x, builder.create<arith::DivFOp>(
logU, builder.create<arith::SubFOp>(u, cstOne)));
- Value approximation = builder.create<SelectOp>(
+ Value approximation = builder.create<arith::SelectOp>(
builder.create<arith::OrIOp>(uSmall, uInf), x, logLarge);
rewriter.replaceOp(op, approximation);
return success();
@@ -765,7 +765,8 @@ ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op,
Value isNegativeArg = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT,
op.getOperand(), zero);
Value negArg = builder.create<arith::NegFOp>(op.getOperand());
- Value x = builder.create<SelectOp>(isNegativeArg, negArg, op.getOperand());
+ Value x =
+ builder.create<arith::SelectOp>(isNegativeArg, negArg, op.getOperand());
Value offset = offsets[0];
Value p[polyDegree + 1];
@@ -781,11 +782,13 @@ ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op,
isLessThanBound[j] =
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, x, bounds[j]);
for (int i = 0; i <= polyDegree; ++i) {
- p[i] = builder.create<SelectOp>(isLessThanBound[j], p[i], pp[j + 1][i]);
- q[i] = builder.create<SelectOp>(isLessThanBound[j], q[i], qq[j + 1][i]);
+ p[i] = builder.create<arith::SelectOp>(isLessThanBound[j], p[i],
+ pp[j + 1][i]);
+ q[i] = builder.create<arith::SelectOp>(isLessThanBound[j], q[i],
+ qq[j + 1][i]);
}
- offset =
- builder.create<SelectOp>(isLessThanBound[j], offset, offsets[j + 1]);
+ offset = builder.create<arith::SelectOp>(isLessThanBound[j], offset,
+ offsets[j + 1]);
}
isLessThanBound[intervalsCount - 1] = builder.create<arith::CmpFOp>(
arith::CmpFPredicate::ULT, x, bounds[intervalsCount - 1]);
@@ -794,12 +797,13 @@ ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op,
Value qPoly = makePolynomialCalculation(builder, q, x);
Value rationalPoly = builder.create<arith::DivFOp>(pPoly, qPoly);
Value formula = builder.create<arith::AddFOp>(offset, rationalPoly);
- formula = builder.create<SelectOp>(isLessThanBound[intervalsCount - 1],
- formula, one);
+ formula = builder.create<arith::SelectOp>(isLessThanBound[intervalsCount - 1],
+ formula, one);
// erf is odd function: erf(x) = -erf(-x).
Value negFormula = builder.create<arith::NegFOp>(formula);
- Value res = builder.create<SelectOp>(isNegativeArg, negFormula, formula);
+ Value res =
+ builder.create<arith::SelectOp>(isNegativeArg, negFormula, formula);
rewriter.replaceOp(op, res);
@@ -917,14 +921,14 @@ ExpApproximation::matchAndRewrite(math::ExpOp op,
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, x, zerof32Const);
Value isComputable = builder.create<arith::AndIOp>(rightBound, leftBound);
- expY = builder.create<SelectOp>(
+ expY = builder.create<arith::SelectOp>(
isNegInfinityX, zerof32Const,
- builder.create<SelectOp>(
+ builder.create<arith::SelectOp>(
isPosInfinityX, constPosInfinity,
- builder.create<SelectOp>(isComputable, expY,
- builder.create<SelectOp>(isPostiveX,
- constPosInfinity,
- underflow))));
+ builder.create<arith::SelectOp>(
+ isComputable, expY,
+ builder.create<arith::SelectOp>(isPostiveX, constPosInfinity,
+ underflow))));
rewriter.replaceOp(op, expY);
@@ -981,9 +985,10 @@ ExpM1Approximation::matchAndRewrite(math::ExpM1Op op,
// (u - 1) * (x / ~x)
Value expm1 = builder.create<arith::MulFOp>(
uMinusOne, builder.create<arith::DivFOp>(x, logU));
- expm1 = builder.create<SelectOp>(isInf, u, expm1);
- Value approximation = builder.create<SelectOp>(
- uEqOne, x, builder.create<SelectOp>(uMinusOneEqNegOne, cstNegOne, expm1));
+ expm1 = builder.create<arith::SelectOp>(isInf, u, expm1);
+ Value approximation = builder.create<arith::SelectOp>(
+ uEqOne, x,
+ builder.create<arith::SelectOp>(uMinusOneEqNegOne, cstNegOne, expm1));
rewriter.replaceOp(op, approximation);
return success();
}
@@ -1053,7 +1058,7 @@ LogicalResult SinAndCosApproximation<isSine, OpTy>::matchAndRewrite(
};
auto select = [&](Value cond, Value t, Value f) -> Value {
- return builder.create<SelectOp>(cond, t, f);
+ return builder.create<arith::SelectOp>(cond, t, f);
};
auto fmla = [&](Value a, Value b, Value c) {
@@ -1189,7 +1194,8 @@ RsqrtApproximation::matchAndRewrite(math::RsqrtOp op,
// return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if
// x is zero or a positive denormalized float (equivalent to flushing positive
// denormalized inputs to zero).
- Value res = builder.create<SelectOp>(notNormalFiniteMask, yApprox, yNewton);
+ Value res =
+ builder.create<arith::SelectOp>(notNormalFiniteMask, yApprox, yNewton);
rewriter.replaceOp(op, res);
return success();
diff --git a/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp
index 293fb58d4e701..2a839771f97f1 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/MemRef/Transforms/ExpandOps.cpp
@@ -67,7 +67,7 @@ struct AtomicRMWOpConverter : public OpRewritePattern<memref::AtomicRMWOp> {
Value lhs = genericOp.getCurrentValue();
Value rhs = op.value();
Value cmp = bodyBuilder.create<arith::CmpFOp>(loc, predicate, lhs, rhs);
- Value select = bodyBuilder.create<SelectOp>(loc, cmp, lhs, rhs);
+ Value select = bodyBuilder.create<arith::SelectOp>(loc, cmp, lhs, rhs);
bodyBuilder.create<memref::AtomicYieldOp>(loc, select);
rewriter.replaceOp(op, genericOp.getResult());
diff --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp
index 69761a56348a9..28c50ba6382fe 100644
--- a/mlir/lib/Dialect/SCF/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/SCF.cpp
@@ -1306,8 +1306,8 @@ struct ConvertTrivialIfToSelect : public OpRewritePattern<IfOp> {
if (trueVal == falseVal)
results[it.index()] = trueVal;
else
- results[it.index()] =
- rewriter.create<SelectOp>(op.getLoc(), cond, trueVal, falseVal);
+ results[it.index()] = rewriter.create<arith::SelectOp>(
+ op.getLoc(), cond, trueVal, falseVal);
}
rewriter.replaceOp(op, results);
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index 148acd16762d4..83766b18e2ed7 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -808,8 +808,8 @@ static Loops stripmineSink(scf::ForOp forOp, Value factor,
Value stepped = b.create<arith::AddIOp>(t.getLoc(), iv, forOp.getStep());
Value less = b.create<arith::CmpIOp>(t.getLoc(), arith::CmpIPredicate::slt,
forOp.getUpperBound(), stepped);
- Value ub =
- b.create<SelectOp>(t.getLoc(), less, forOp.getUpperBound(), stepped);
+ Value ub = b.create<arith::SelectOp>(t.getLoc(), less,
+ forOp.getUpperBound(), stepped);
// Splice [begin, begin + nOps - 1) into `newForOp` and replace uses.
auto newForOp = b.create<scf::ForOp>(t.getLoc(), iv, ub, originalStep);
diff --git a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
index 32cfeb16a73fa..be755f76e6d2c 100644
--- a/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Pipelines/SparseTensorPipelines.cpp
@@ -36,7 +36,6 @@ void mlir::sparse_tensor::buildSparseCompiler(
pm.addPass(createFuncBufferizePass());
pm.addPass(arith::createConstantBufferizePass());
pm.addPass(createTensorBufferizePass());
- pm.addPass(createStdBufferizePass());
pm.addPass(mlir::bufferization::createFinalizingBufferizePass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass());
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index 9118cfb787e9b..72e70ddbc123e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -787,8 +787,8 @@ static void genTensorStore(Merger &merger, CodeGen &codegen,
// Test if this is a scalarized reduction.
if (codegen.redVal) {
if (codegen.curVecLength > 1)
- rhs = rewriter.create<SelectOp>(loc, codegen.curVecMask, rhs,
- codegen.redVal);
+ rhs = rewriter.create<arith::SelectOp>(loc, codegen.curVecMask, rhs,
+ codegen.redVal);
updateReduc(merger, codegen, rhs);
return;
}
@@ -1276,7 +1276,7 @@ static void genLocals(Merger &merger, CodeGen &codegen,
if (min) {
Value cmp = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ult, load, min);
- min = rewriter.create<SelectOp>(loc, cmp, load, min);
+ min = rewriter.create<arith::SelectOp>(loc, cmp, load, min);
} else {
min = load;
}
@@ -1363,7 +1363,7 @@ static void genWhileInduction(Merger &merger, CodeGen &codegen,
Value cmp = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq,
op1, op2);
Value add = rewriter.create<arith::AddIOp>(loc, op3, one);
- operands.push_back(rewriter.create<SelectOp>(loc, cmp, add, op3));
+ operands.push_back(rewriter.create<arith::SelectOp>(loc, cmp, add, op3));
codegen.pidxs[tensor][idx] = whileOp->getResult(o++);
}
}
diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 932959517b655..c1e99f58faaf1 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -304,23 +304,6 @@ LogicalResult CallIndirectOp::canonicalize(CallIndirectOp indirectCall,
return success();
}
-//===----------------------------------------------------------------------===//
-// General helpers for comparison ops
-//===----------------------------------------------------------------------===//
-
-// Return the type of the same shape (scalar, vector or tensor) containing i1.
-static Type getI1SameShape(Type type) {
- auto i1Type = IntegerType::get(type.getContext(), 1);
- if (auto tensorType = type.dyn_cast<RankedTensorType>())
- return RankedTensorType::get(tensorType.getShape(), i1Type);
- if (type.isa<UnrankedTensorType>())
- return UnrankedTensorType::get(i1Type);
- if (auto vectorType = type.dyn_cast<VectorType>())
- return VectorType::get(vectorType.getShape(), i1Type,
- vectorType.getNumScalableDims());
- return i1Type;
-}
-
//===----------------------------------------------------------------------===//
// CondBranchOp
//===----------------------------------------------------------------------===//
@@ -390,7 +373,7 @@ struct SimplifyPassThroughCondBranch : public OpRewritePattern<CondBranchOp> {
/// -> br ^bb1(A, ..., N)
///
/// cond_br %cond, ^bb1(A), ^bb1(B)
-/// -> %select = select %cond, A, B
+/// -> %select = arith.select %cond, A, B
/// br ^bb1(%select)
///
struct SimplifyCondBranchIdenticalSuccessors
@@ -426,7 +409,7 @@ struct SimplifyCondBranchIdenticalSuccessors
if (std::get<0>(it) == std::get<1>(it))
mergedOperands.push_back(std::get<0>(it));
else
- mergedOperands.push_back(rewriter.create<SelectOp>(
+ mergedOperands.push_back(rewriter.create<arith::SelectOp>(
condbr.getLoc(), condition, std::get<0>(it), std::get<1>(it)));
}
@@ -697,172 +680,6 @@ LogicalResult ReturnOp::verify() {
return success();
}
-//===----------------------------------------------------------------------===//
-// SelectOp
-//===----------------------------------------------------------------------===//
-
-// Transforms a select of a boolean to arithmetic operations
-//
-// select %arg, %x, %y : i1
-//
-// becomes
-//
-// and(%arg, %x) or and(!%arg, %y)
-struct SelectI1Simplify : public OpRewritePattern<SelectOp> {
- using OpRewritePattern<SelectOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(SelectOp op,
- PatternRewriter &rewriter) const override {
- if (!op.getType().isInteger(1))
- return failure();
-
- Value falseConstant =
- rewriter.create<arith::ConstantIntOp>(op.getLoc(), true, 1);
- Value notCondition = rewriter.create<arith::XOrIOp>(
- op.getLoc(), op.getCondition(), falseConstant);
-
- Value trueVal = rewriter.create<arith::AndIOp>(
- op.getLoc(), op.getCondition(), op.getTrueValue());
- Value falseVal = rewriter.create<arith::AndIOp>(op.getLoc(), notCondition,
- op.getFalseValue());
- rewriter.replaceOpWithNewOp<arith::OrIOp>(op, trueVal, falseVal);
- return success();
- }
-};
-
-// select %arg, %c1, %c0 => extui %arg
-struct SelectToExtUI : public OpRewritePattern<SelectOp> {
- using OpRewritePattern<SelectOp>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(SelectOp op,
- PatternRewriter &rewriter) const override {
- // Cannot extui i1 to i1, or i1 to f32
- if (!op.getType().isa<IntegerType>() || op.getType().isInteger(1))
- return failure();
-
- // select %x, c1, %c0 => extui %arg
- if (matchPattern(op.getTrueValue(), m_One()))
- if (matchPattern(op.getFalseValue(), m_Zero())) {
- rewriter.replaceOpWithNewOp<arith::ExtUIOp>(op, op.getType(),
- op.getCondition());
- return success();
- }
-
- // select %x, c0, %c1 => extui (xor %arg, true)
- if (matchPattern(op.getTrueValue(), m_Zero()))
- if (matchPattern(op.getFalseValue(), m_One())) {
- rewriter.replaceOpWithNewOp<arith::ExtUIOp>(
- op, op.getType(),
- rewriter.create<arith::XOrIOp>(
- op.getLoc(), op.getCondition(),
- rewriter.create<arith::ConstantIntOp>(
- op.getLoc(), 1, op.getCondition().getType())));
- return success();
- }
-
- return failure();
- }
-};
-
-void SelectOp::getCanonicalizationPatterns(RewritePatternSet &results,
- MLIRContext *context) {
- results.insert<SelectI1Simplify, SelectToExtUI>(context);
-}
-
-OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
- auto trueVal = getTrueValue();
- auto falseVal = getFalseValue();
- if (trueVal == falseVal)
- return trueVal;
-
- auto condition = getCondition();
-
- // select true, %0, %1 => %0
- if (matchPattern(condition, m_One()))
- return trueVal;
-
- // select false, %0, %1 => %1
- if (matchPattern(condition, m_Zero()))
- return falseVal;
-
- // select %x, true, false => %x
- if (getType().isInteger(1))
- if (matchPattern(getTrueValue(), m_One()))
- if (matchPattern(getFalseValue(), m_Zero()))
- return condition;
-
- if (auto cmp = dyn_cast_or_null<arith::CmpIOp>(condition.getDefiningOp())) {
- auto pred = cmp.getPredicate();
- if (pred == arith::CmpIPredicate::eq || pred == arith::CmpIPredicate::ne) {
- auto cmpLhs = cmp.getLhs();
- auto cmpRhs = cmp.getRhs();
-
- // %0 = arith.cmpi eq, %arg0, %arg1
- // %1 = select %0, %arg0, %arg1 => %arg1
-
- // %0 = arith.cmpi ne, %arg0, %arg1
- // %1 = select %0, %arg0, %arg1 => %arg0
-
- if ((cmpLhs == trueVal && cmpRhs == falseVal) ||
- (cmpRhs == trueVal && cmpLhs == falseVal))
- return pred == arith::CmpIPredicate::ne ? trueVal : falseVal;
- }
- }
- return nullptr;
-}
-
-static void print(OpAsmPrinter &p, SelectOp op) {
- p << " " << op.getOperands();
- p.printOptionalAttrDict(op->getAttrs());
- p << " : ";
- if (ShapedType condType = op.getCondition().getType().dyn_cast<ShapedType>())
- p << condType << ", ";
- p << op.getType();
-}
-
-static ParseResult parseSelectOp(OpAsmParser &parser, OperationState &result) {
- Type conditionType, resultType;
- SmallVector<OpAsmParser::OperandType, 3> operands;
- if (parser.parseOperandList(operands, /*requiredOperandCount=*/3) ||
- parser.parseOptionalAttrDict(result.attributes) ||
- parser.parseColonType(resultType))
- return failure();
-
- // Check for the explicit condition type if this is a masked tensor or vector.
- if (succeeded(parser.parseOptionalComma())) {
- conditionType = resultType;
- if (parser.parseType(resultType))
- return failure();
- } else {
- conditionType = parser.getBuilder().getI1Type();
- }
-
- result.addTypes(resultType);
- return parser.resolveOperands(operands,
- {conditionType, resultType, resultType},
- parser.getNameLoc(), result.operands);
-}
-
-LogicalResult SelectOp::verify() {
- Type conditionType = getCondition().getType();
- if (conditionType.isSignlessInteger(1))
- return success();
-
- // If the result type is a vector or tensor, the type can be a mask with the
- // same elements.
- Type resultType = getType();
- if (!resultType.isa<TensorType, VectorType>())
- return emitOpError() << "expected condition to be a signless i1, but got "
- << conditionType;
- Type shapedConditionType = getI1SameShape(resultType);
- if (conditionType != shapedConditionType)
- return emitOpError() << "expected condition type to have the same shape "
- "as the result type, expected "
- << shapedConditionType << ", but got "
- << conditionType;
- return success();
-}
-
//===----------------------------------------------------------------------===//
// SwitchOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.cpp
deleted file mode 100644
index b89a5372a48b6..0000000000000
--- a/mlir/lib/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h"
-
-#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/IR/Dialect.h"
-#include "mlir/IR/Operation.h"
-
-using namespace mlir;
-using namespace mlir::bufferization;
-
-namespace mlir {
-namespace {
-
-/// Bufferization of std.select. Just replace the operands.
-struct SelectOpInterface
- : public BufferizableOpInterface::ExternalModel<SelectOpInterface,
- SelectOp> {
- bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
- const BufferizationState &state) const {
- return false;
- }
-
- bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
- const BufferizationState &state) const {
- return false;
- }
-
- OpResult getAliasingOpResult(Operation *op, OpOperand &opOperand,
- const BufferizationState &state) const {
- return op->getOpResult(0) /*result*/;
- }
-
- SmallVector<OpOperand *>
- getAliasingOpOperand(Operation *op, OpResult opResult,
- const BufferizationState &state) const {
- return {&op->getOpOperand(1) /*true_value*/,
- &op->getOpOperand(2) /*false_value*/};
- }
-
- LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
- const BufferizationState &state) const {
- auto selectOp = cast<SelectOp>(op);
- // `getBuffer` introduces copies if an OpOperand bufferizes out-of-place.
- // TODO: It would be more efficient to copy the result of the `select` op
- // instead of its OpOperands. In the worst case, 2 copies are inserted at
- // the moment (one for each tensor). When copying the op result, only one
- // copy would be needed.
- Value trueBuffer =
- *state.getBuffer(rewriter, selectOp->getOpOperand(1) /*true_value*/);
- Value falseBuffer =
- *state.getBuffer(rewriter, selectOp->getOpOperand(2) /*false_value*/);
- replaceOpWithNewBufferizedOp<SelectOp>(
- rewriter, op, selectOp.getCondition(), trueBuffer, falseBuffer);
- return success();
- }
-
- BufferRelation bufferRelation(Operation *op, OpResult opResult,
- const BufferizationState &state) const {
- return BufferRelation::None;
- }
-};
-
-} // namespace
-} // namespace mlir
-
-void mlir::registerBufferizableOpInterfaceExternalModels(
- DialectRegistry ®istry) {
- registry.addOpInterface<SelectOp, SelectOpInterface>();
-}
diff --git a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp b/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
deleted file mode 100644
index 64f9d040a71ca..0000000000000
--- a/mlir/lib/Dialect/StandardOps/Transforms/Bufferize.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- Bufferize.cpp - Bufferization for std ops --------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements bufferization of std ops.
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
-#include "PassDetail.h"
-#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
-#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
-#include "mlir/Dialect/MemRef/IR/MemRef.h"
-#include "mlir/Dialect/SCF/SCF.h"
-#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h"
-#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
-#include "mlir/Dialect/Tensor/IR/Tensor.h"
-
-using namespace mlir;
-using namespace mlir::bufferization;
-
-namespace {
-struct StdBufferizePass : public StdBufferizeBase<StdBufferizePass> {
- void runOnOperation() override {
- std::unique_ptr<BufferizationOptions> options =
- getPartialBufferizationOptions();
- options->addToDialectFilter<StandardOpsDialect>();
-
- if (failed(bufferizeOp(getOperation(), *options)))
- signalPassFailure();
- }
-
- void getDependentDialects(DialectRegistry ®istry) const override {
- registry.insert<bufferization::BufferizationDialect, memref::MemRefDialect,
- StandardOpsDialect, scf::SCFDialect>();
- mlir::registerBufferizableOpInterfaceExternalModels(registry);
- }
-};
-} // namespace
-
-std::unique_ptr<Pass> mlir::createStdBufferizePass() {
- return std::make_unique<StdBufferizePass>();
-}
diff --git a/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt b/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt
index 7db425fdc361d..3ee725bc0489d 100644
--- a/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt
@@ -1,6 +1,4 @@
add_mlir_dialect_library(MLIRStandardOpsTransforms
- BufferizableOpInterfaceImpl.cpp
- Bufferize.cpp
DecomposeCallGraphTypes.cpp
FuncBufferize.cpp
FuncConversions.cpp
diff --git a/mlir/lib/Dialect/StandardOps/Utils/Utils.cpp b/mlir/lib/Dialect/StandardOps/Utils/Utils.cpp
index dc65dd6d6db5b..7b1fbe9127052 100644
--- a/mlir/lib/Dialect/StandardOps/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/StandardOps/Utils/Utils.cpp
@@ -93,5 +93,5 @@ Value ArithBuilder::slt(Value lhs, Value rhs) {
return b.create<arith::CmpFOp>(loc, arith::CmpFPredicate::OLT, lhs, rhs);
}
Value ArithBuilder::select(Value cmp, Value lhs, Value rhs) {
- return b.create<SelectOp>(loc, cmp, lhs, rhs);
+ return b.create<arith::SelectOp>(loc, cmp, lhs, rhs);
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 4ff9d65ad6696..e5617d09c8e7f 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -698,7 +698,7 @@ class CreateMaskOpLowering : public OpRewritePattern<vector::CreateMaskOp> {
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(d));
Value val = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
bnd, idx);
- Value sel = rewriter.create<SelectOp>(loc, val, trueVal, falseVal);
+ Value sel = rewriter.create<arith::SelectOp>(loc, val, trueVal, falseVal);
auto pos = rewriter.getI64ArrayAttr(d);
result =
rewriter.create<vector::InsertOp>(loc, dstType, sel, result, pos);
diff --git a/mlir/test/Analysis/test-match-reduction.mlir b/mlir/test/Analysis/test-match-reduction.mlir
index dcfd4b24ed167..a3b5e1a70af18 100644
--- a/mlir/test/Analysis/test-match-reduction.mlir
+++ b/mlir/test/Analysis/test-match-reduction.mlir
@@ -52,7 +52,7 @@ func @linalg_red_max(%in0t: tensor<4x4xf32>, %out0t: tensor<4xf32>) {
outs(%out0t : tensor<4xf32>) {
^bb0(%in0: f32, %out0: f32):
%cmp = arith.cmpf ogt, %in0, %out0 : f32
- %sel = select %cmp, %in0, %out0 : f32
+ %sel = arith.select %cmp, %in0, %out0 : f32
linalg.yield %sel : f32
} -> tensor<4xf32>
return
diff --git a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
index d4e3c03694644..c1fcf864034eb 100644
--- a/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
+++ b/mlir/test/Conversion/AffineToStandard/lower-affine.mlir
@@ -374,11 +374,11 @@ func @if_for() {
// CHECK-NEXT: %[[a:.*]] = arith.muli %{{.*}}, %[[cm1]] : index
// CHECK-NEXT: %[[b:.*]] = arith.addi %[[a]], %{{.*}} : index
// CHECK-NEXT: %[[c:.*]] = arith.cmpi sgt, %{{.*}}, %[[b]] : index
-// CHECK-NEXT: %[[d:.*]] = select %[[c]], %{{.*}}, %[[b]] : index
+// CHECK-NEXT: %[[d:.*]] = arith.select %[[c]], %{{.*}}, %[[b]] : index
// CHECK-NEXT: %[[c10:.*]] = arith.constant 10 : index
// CHECK-NEXT: %[[e:.*]] = arith.addi %{{.*}}, %[[c10]] : index
// CHECK-NEXT: %[[f:.*]] = arith.cmpi slt, %{{.*}}, %[[e]] : index
-// CHECK-NEXT: %[[g:.*]] = select %[[f]], %{{.*}}, %[[e]] : index
+// CHECK-NEXT: %[[g:.*]] = arith.select %[[f]], %{{.*}}, %[[e]] : index
// CHECK-NEXT: %[[c1_0:.*]] = arith.constant 1 : index
// CHECK-NEXT: for %{{.*}} = %[[d]] to %[[g]] step %[[c1_0]] {
// CHECK-NEXT: call @body2(%{{.*}}, %{{.*}}) : (index, index) -> ()
@@ -403,17 +403,17 @@ func @loop_min_max(%N : index) {
// CHECK-LABEL: func @min_reduction_tree
// CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index
// CHECK-NEXT: %[[c01:.+]] = arith.cmpi slt, %{{.*}}, %{{.*}} : index
-// CHECK-NEXT: %[[r01:.+]] = select %[[c01]], %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %[[r01:.+]] = arith.select %[[c01]], %{{.*}}, %{{.*}} : index
// CHECK-NEXT: %[[c012:.+]] = arith.cmpi slt, %[[r01]], %{{.*}} : index
-// CHECK-NEXT: %[[r012:.+]] = select %[[c012]], %[[r01]], %{{.*}} : index
+// CHECK-NEXT: %[[r012:.+]] = arith.select %[[c012]], %[[r01]], %{{.*}} : index
// CHECK-NEXT: %[[c0123:.+]] = arith.cmpi slt, %[[r012]], %{{.*}} : index
-// CHECK-NEXT: %[[r0123:.+]] = select %[[c0123]], %[[r012]], %{{.*}} : index
+// CHECK-NEXT: %[[r0123:.+]] = arith.select %[[c0123]], %[[r012]], %{{.*}} : index
// CHECK-NEXT: %[[c01234:.+]] = arith.cmpi slt, %[[r0123]], %{{.*}} : index
-// CHECK-NEXT: %[[r01234:.+]] = select %[[c01234]], %[[r0123]], %{{.*}} : index
+// CHECK-NEXT: %[[r01234:.+]] = arith.select %[[c01234]], %[[r0123]], %{{.*}} : index
// CHECK-NEXT: %[[c012345:.+]] = arith.cmpi slt, %[[r01234]], %{{.*}} : index
-// CHECK-NEXT: %[[r012345:.+]] = select %[[c012345]], %[[r01234]], %{{.*}} : index
+// CHECK-NEXT: %[[r012345:.+]] = arith.select %[[c012345]], %[[r01234]], %{{.*}} : index
// CHECK-NEXT: %[[c0123456:.+]] = arith.cmpi slt, %[[r012345]], %{{.*}} : index
-// CHECK-NEXT: %[[r0123456:.+]] = select %[[c0123456]], %[[r012345]], %{{.*}} : index
+// CHECK-NEXT: %[[r0123456:.+]] = arith.select %[[c0123456]], %[[r012345]], %{{.*}} : index
// CHECK-NEXT: %[[c1:.*]] = arith.constant 1 : index
// CHECK-NEXT: for %{{.*}} = %[[c0]] to %[[r0123456]] step %[[c1]] {
// CHECK-NEXT: call @body(%{{.*}}) : (index) -> ()
@@ -507,7 +507,7 @@ func @affine_apply_mod(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[c0:.*]] = arith.constant 0 : index
// CHECK-NEXT: %[[v1:.*]] = arith.cmpi slt, %[[v0]], %[[c0]] : index
// CHECK-NEXT: %[[v2:.*]] = arith.addi %[[v0]], %[[c42]] : index
-// CHECK-NEXT: %[[v3:.*]] = select %[[v1]], %[[v2]], %[[v0]] : index
+// CHECK-NEXT: %[[v3:.*]] = arith.select %[[v1]], %[[v2]], %[[v0]] : index
%0 = affine.apply #mapmod (%arg0)
return %0 : index
}
@@ -526,10 +526,10 @@ func @affine_apply_floordiv(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[cm1:.*]] = arith.constant -1 : index
// CHECK-NEXT: %[[v0:.*]] = arith.cmpi slt, %{{.*}}, %[[c0]] : index
// CHECK-NEXT: %[[v1:.*]] = arith.subi %[[cm1]], %{{.*}} : index
-// CHECK-NEXT: %[[v2:.*]] = select %[[v0]], %[[v1]], %{{.*}} : index
+// CHECK-NEXT: %[[v2:.*]] = arith.select %[[v0]], %[[v1]], %{{.*}} : index
// CHECK-NEXT: %[[v3:.*]] = arith.divsi %[[v2]], %[[c42]] : index
// CHECK-NEXT: %[[v4:.*]] = arith.subi %[[cm1]], %[[v3]] : index
-// CHECK-NEXT: %[[v5:.*]] = select %[[v0]], %[[v4]], %[[v3]] : index
+// CHECK-NEXT: %[[v5:.*]] = arith.select %[[v0]], %[[v4]], %[[v3]] : index
%0 = affine.apply #mapfloordiv (%arg0)
return %0 : index
}
@@ -549,11 +549,11 @@ func @affine_apply_ceildiv(%arg0 : index) -> (index) {
// CHECK-NEXT: %[[v0:.*]] = arith.cmpi sle, %{{.*}}, %[[c0]] : index
// CHECK-NEXT: %[[v1:.*]] = arith.subi %[[c0]], %{{.*}} : index
// CHECK-NEXT: %[[v2:.*]] = arith.subi %{{.*}}, %[[c1]] : index
-// CHECK-NEXT: %[[v3:.*]] = select %[[v0]], %[[v1]], %[[v2]] : index
+// CHECK-NEXT: %[[v3:.*]] = arith.select %[[v0]], %[[v1]], %[[v2]] : index
// CHECK-NEXT: %[[v4:.*]] = arith.divsi %[[v3]], %[[c42]] : index
// CHECK-NEXT: %[[v5:.*]] = arith.subi %[[c0]], %[[v4]] : index
// CHECK-NEXT: %[[v6:.*]] = arith.addi %[[v4]], %[[c1]] : index
-// CHECK-NEXT: %[[v7:.*]] = select %[[v0]], %[[v5]], %[[v6]] : index
+// CHECK-NEXT: %[[v7:.*]] = arith.select %[[v0]], %[[v5]], %[[v6]] : index
%0 = affine.apply #mapceildiv (%arg0)
return %0 : index
}
@@ -652,7 +652,7 @@ func @affine_min(%arg0: index, %arg1: index) -> index{
// CHECK: %[[neg2:.*]] = arith.muli %[[ARG0]], %[[Cm2:.*]]
// CHECK: %[[second:.*]] = arith.addi %[[ARG1]], %[[neg2]]
// CHECK: %[[cmp:.*]] = arith.cmpi slt, %[[first]], %[[second]]
- // CHECK: select %[[cmp]], %[[first]], %[[second]]
+ // CHECK: arith.select %[[cmp]], %[[first]], %[[second]]
%0 = affine.min affine_map<(d0,d1) -> (d0 - d1, d1 - d0)>(%arg0, %arg1)
return %0 : index
}
@@ -667,7 +667,7 @@ func @affine_max(%arg0: index, %arg1: index) -> index{
// CHECK: %[[neg2:.*]] = arith.muli %[[ARG0]], %[[Cm2:.*]]
// CHECK: %[[second:.*]] = arith.addi %[[ARG1]], %[[neg2]]
// CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[first]], %[[second]]
- // CHECK: select %[[cmp]], %[[first]], %[[second]]
+ // CHECK: arith.select %[[cmp]], %[[first]], %[[second]]
%0 = affine.max affine_map<(d0,d1) -> (d0 - d1, d1 - d0)>(%arg0, %arg1)
return %0 : index
}
diff --git a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
index ecc173dc44f0b..14c0c3bd451ab 100644
--- a/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
+++ b/mlir/test/Conversion/ArithmeticToLLVM/arith-to-llvm.mlir
@@ -374,3 +374,12 @@ func @cmpi_2dvector(%arg0 : vector<4x3xi32>, %arg1 : vector<4x3xi32>) {
%0 = arith.cmpi ult, %arg0, %arg1 : vector<4x3xi32>
std.return
}
+
+// -----
+
+// CHECK-LABEL: @select
+func @select(%arg0 : i1, %arg1 : i32, %arg2 : i32) -> i32 {
+ // CHECK: = llvm.select %arg0, %arg1, %arg2 : i1, i32
+ %0 = arith.select %arg0, %arg1, %arg2 : i32
+ return %0 : i32
+}
diff --git a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
index 38274eebe5bad..1d40ba6f8a572 100644
--- a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
+++ b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir
@@ -194,3 +194,19 @@ func @bitcast_2d(%arg0: vector<2x4xf32>) {
arith.bitcast %arg0 : vector<2x4xf32> to vector<2x4xi32>
return
}
+
+// -----
+
+// CHECK-LABEL: func @select_2d(
+func @select_2d(%arg0 : vector<4x3xi1>, %arg1 : vector<4x3xi32>, %arg2 : vector<4x3xi32>) {
+ // CHECK: %[[ARG0:.*]] = builtin.unrealized_conversion_cast %arg0
+ // CHECK: %[[ARG1:.*]] = builtin.unrealized_conversion_cast %arg1
+ // CHECK: %[[ARG2:.*]] = builtin.unrealized_conversion_cast %arg2
+ // CHECK: %[[EXTRACT1:.*]] = llvm.extractvalue %[[ARG0]][0] : !llvm.array<4 x vector<3xi1>>
+ // CHECK: %[[EXTRACT2:.*]] = llvm.extractvalue %[[ARG1]][0] : !llvm.array<4 x vector<3xi32>>
+ // CHECK: %[[EXTRACT3:.*]] = llvm.extractvalue %[[ARG2]][0] : !llvm.array<4 x vector<3xi32>>
+ // CHECK: %[[SELECT:.*]] = llvm.select %[[EXTRACT1]], %[[EXTRACT2]], %[[EXTRACT3]] : vector<3xi1>, vector<3xi32>
+ // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[SELECT]], %{{.*}}[0] : !llvm.array<4 x vector<3xi32>>
+ %0 = arith.select %arg0, %arg1, %arg2 : vector<4x3xi1>, vector<4x3xi32>
+ std.return
+}
diff --git a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
index 18b7680e8e1b8..9eeddf2edb0c3 100644
--- a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
+++ b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
@@ -892,3 +892,21 @@ func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
}
} // end module
+
+// -----
+
+module attributes {
+ spv.target_env = #spv.target_env<
+ #spv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64],
+ [SPV_KHR_storage_buffer_storage_class]>, {}>
+} {
+
+// CHECK-LABEL: @select
+func @select(%arg0 : i32, %arg1 : i32) {
+ %0 = arith.cmpi sle, %arg0, %arg1 : i32
+ // CHECK: spv.Select
+ %1 = arith.select %0, %arg0, %arg1 : i32
+ return
+}
+
+} // end module
diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
index 80e90229e08ff..e43f182b2047d 100644
--- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
@@ -87,9 +87,9 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[LHS_IS_INFINITE:.*]] = arith.ori %[[LHS_REAL_INFINITE]], %[[LHS_IMAG_INFINITE]] : i1
// CHECK: %[[INF_NUM_FINITE_DENOM:.*]] = arith.andi %[[LHS_IS_INFINITE]], %[[RHS_IS_FINITE]] : i1
// CHECK: %[[ONE:.*]] = arith.constant 1.000000e+00 : f32
-// CHECK: %[[LHS_REAL_IS_INF:.*]] = select %[[LHS_REAL_INFINITE]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[LHS_REAL_IS_INF:.*]] = arith.select %[[LHS_REAL_INFINITE]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[LHS_REAL_IS_INF_WITH_SIGN:.*]] = math.copysign %[[LHS_REAL_IS_INF]], %[[LHS_REAL]] : f32
-// CHECK: %[[LHS_IMAG_IS_INF:.*]] = select %[[LHS_IMAG_INFINITE]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[LHS_IMAG_IS_INF:.*]] = arith.select %[[LHS_IMAG_INFINITE]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[LHS_IMAG_IS_INF_WITH_SIGN:.*]] = math.copysign %[[LHS_IMAG_IS_INF]], %[[LHS_IMAG]] : f32
// CHECK: %[[LHS_REAL_IS_INF_WITH_SIGN_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_REAL_IS_INF_WITH_SIGN]], %[[RHS_REAL]] : f32
// CHECK: %[[LHS_IMAG_IS_INF_WITH_SIGN_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_IMAG_IS_INF_WITH_SIGN]], %[[RHS_IMAG]] : f32
@@ -108,9 +108,9 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[RHS_IMAG_INFINITE:.*]] = arith.cmpf oeq, %[[RHS_IMAG_ABS]], %[[INF]] : f32
// CHECK: %[[RHS_IS_INFINITE:.*]] = arith.ori %[[RHS_REAL_INFINITE]], %[[RHS_IMAG_INFINITE]] : i1
// CHECK: %[[FINITE_NUM_INFINITE_DENOM:.*]] = arith.andi %[[LHS_IS_FINITE]], %[[RHS_IS_INFINITE]] : i1
-// CHECK: %[[RHS_REAL_IS_INF:.*]] = select %[[RHS_REAL_INFINITE]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[RHS_REAL_IS_INF:.*]] = arith.select %[[RHS_REAL_INFINITE]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[RHS_REAL_IS_INF_WITH_SIGN:.*]] = math.copysign %[[RHS_REAL_IS_INF]], %[[RHS_REAL]] : f32
-// CHECK: %[[RHS_IMAG_IS_INF:.*]] = select %[[RHS_IMAG_INFINITE]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[RHS_IMAG_IS_INF:.*]] = arith.select %[[RHS_IMAG_INFINITE]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[RHS_IMAG_IS_INF_WITH_SIGN:.*]] = math.copysign %[[RHS_IMAG_IS_INF]], %[[RHS_IMAG]] : f32
// CHECK: %[[RHS_REAL_IS_INF_WITH_SIGN_TIMES_LHS_REAL:.*]] = arith.mulf %[[LHS_REAL]], %[[RHS_REAL_IS_INF_WITH_SIGN]] : f32
// CHECK: %[[RHS_IMAG_IS_INF_WITH_SIGN_TIMES_LHS_IMAG:.*]] = arith.mulf %[[LHS_IMAG]], %[[RHS_IMAG_IS_INF_WITH_SIGN]] : f32
@@ -122,19 +122,19 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[RESULT_IMAG_4:.*]] = arith.mulf %[[ZERO]], %[[ZERO_MULTIPLICATOR_2]] : f32
// CHECK: %[[REAL_ABS_SMALLER_THAN_IMAG_ABS:.*]] = arith.cmpf olt, %[[RHS_REAL_ABS]], %[[RHS_IMAG_ABS]] : f32
-// CHECK: %[[RESULT_REAL:.*]] = select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_REAL_1]], %[[RESULT_REAL_2]] : f32
-// CHECK: %[[RESULT_IMAG:.*]] = select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_IMAG_1]], %[[RESULT_IMAG_2]] : f32
-// CHECK: %[[RESULT_REAL_SPECIAL_CASE_3:.*]] = select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_REAL_4]], %[[RESULT_REAL]] : f32
-// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_3:.*]] = select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_IMAG_4]], %[[RESULT_IMAG]] : f32
-// CHECK: %[[RESULT_REAL_SPECIAL_CASE_2:.*]] = select %[[INF_NUM_FINITE_DENOM]], %[[RESULT_REAL_3]], %[[RESULT_REAL_SPECIAL_CASE_3]] : f32
-// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_2:.*]] = select %[[INF_NUM_FINITE_DENOM]], %[[RESULT_IMAG_3]], %[[RESULT_IMAG_SPECIAL_CASE_3]] : f32
-// CHECK: %[[RESULT_REAL_SPECIAL_CASE_1:.*]] = select %[[RESULT_IS_INFINITY]], %[[INFINITY_RESULT_REAL]], %[[RESULT_REAL_SPECIAL_CASE_2]] : f32
-// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_1:.*]] = select %[[RESULT_IS_INFINITY]], %[[INFINITY_RESULT_IMAG]], %[[RESULT_IMAG_SPECIAL_CASE_2]] : f32
+// CHECK: %[[RESULT_REAL:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_REAL_1]], %[[RESULT_REAL_2]] : f32
+// CHECK: %[[RESULT_IMAG:.*]] = arith.select %[[REAL_ABS_SMALLER_THAN_IMAG_ABS]], %[[RESULT_IMAG_1]], %[[RESULT_IMAG_2]] : f32
+// CHECK: %[[RESULT_REAL_SPECIAL_CASE_3:.*]] = arith.select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_REAL_4]], %[[RESULT_REAL]] : f32
+// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_3:.*]] = arith.select %[[FINITE_NUM_INFINITE_DENOM]], %[[RESULT_IMAG_4]], %[[RESULT_IMAG]] : f32
+// CHECK: %[[RESULT_REAL_SPECIAL_CASE_2:.*]] = arith.select %[[INF_NUM_FINITE_DENOM]], %[[RESULT_REAL_3]], %[[RESULT_REAL_SPECIAL_CASE_3]] : f32
+// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_2:.*]] = arith.select %[[INF_NUM_FINITE_DENOM]], %[[RESULT_IMAG_3]], %[[RESULT_IMAG_SPECIAL_CASE_3]] : f32
+// CHECK: %[[RESULT_REAL_SPECIAL_CASE_1:.*]] = arith.select %[[RESULT_IS_INFINITY]], %[[INFINITY_RESULT_REAL]], %[[RESULT_REAL_SPECIAL_CASE_2]] : f32
+// CHECK: %[[RESULT_IMAG_SPECIAL_CASE_1:.*]] = arith.select %[[RESULT_IS_INFINITY]], %[[INFINITY_RESULT_IMAG]], %[[RESULT_IMAG_SPECIAL_CASE_2]] : f32
// CHECK: %[[RESULT_REAL_IS_NAN:.*]] = arith.cmpf uno, %[[RESULT_REAL]], %[[ZERO]] : f32
// CHECK: %[[RESULT_IMAG_IS_NAN:.*]] = arith.cmpf uno, %[[RESULT_IMAG]], %[[ZERO]] : f32
// CHECK: %[[RESULT_IS_NAN:.*]] = arith.andi %[[RESULT_REAL_IS_NAN]], %[[RESULT_IMAG_IS_NAN]] : i1
-// CHECK: %[[RESULT_REAL_WITH_SPECIAL_CASES:.*]] = select %[[RESULT_IS_NAN]], %[[RESULT_REAL_SPECIAL_CASE_1]], %[[RESULT_REAL]] : f32
-// CHECK: %[[RESULT_IMAG_WITH_SPECIAL_CASES:.*]] = select %[[RESULT_IS_NAN]], %[[RESULT_IMAG_SPECIAL_CASE_1]], %[[RESULT_IMAG]] : f32
+// CHECK: %[[RESULT_REAL_WITH_SPECIAL_CASES:.*]] = arith.select %[[RESULT_IS_NAN]], %[[RESULT_REAL_SPECIAL_CASE_1]], %[[RESULT_REAL]] : f32
+// CHECK: %[[RESULT_IMAG_WITH_SPECIAL_CASES:.*]] = arith.select %[[RESULT_IS_NAN]], %[[RESULT_IMAG_SPECIAL_CASE_1]], %[[RESULT_IMAG]] : f32
// CHECK: %[[RESULT:.*]] = complex.create %[[RESULT_REAL_WITH_SPECIAL_CASES]], %[[RESULT_IMAG_WITH_SPECIAL_CASES]] : complex<f32>
// CHECK: return %[[RESULT]] : complex<f32>
@@ -253,18 +253,18 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[RHS_IMAG_IS_NAN:.*]] = arith.cmpf uno, %[[RHS_IMAG]], %[[RHS_IMAG]] : f32
// CHECK: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
// CHECK: %[[ONE:.*]] = arith.constant 1.000000e+00 : f32
-// CHECK: %[[LHS_REAL_IS_INF_FLOAT:.*]] = select %[[LHS_REAL_IS_INF]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[LHS_REAL_IS_INF_FLOAT:.*]] = arith.select %[[LHS_REAL_IS_INF]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[TMP:.*]] = math.copysign %[[LHS_REAL_IS_INF_FLOAT]], %[[LHS_REAL]] : f32
-// CHECK: %[[LHS_REAL1:.*]] = select %[[LHS_IS_INF]], %[[TMP]], %[[LHS_REAL]] : f32
-// CHECK: %[[LHS_IMAG_IS_INF_FLOAT:.*]] = select %[[LHS_IMAG_IS_INF]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[LHS_REAL1:.*]] = arith.select %[[LHS_IS_INF]], %[[TMP]], %[[LHS_REAL]] : f32
+// CHECK: %[[LHS_IMAG_IS_INF_FLOAT:.*]] = arith.select %[[LHS_IMAG_IS_INF]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[TMP:.*]] = math.copysign %[[LHS_IMAG_IS_INF_FLOAT]], %[[LHS_IMAG]] : f32
-// CHECK: %[[LHS_IMAG1:.*]] = select %[[LHS_IS_INF]], %[[TMP]], %[[LHS_IMAG]] : f32
+// CHECK: %[[LHS_IMAG1:.*]] = arith.select %[[LHS_IS_INF]], %[[TMP]], %[[LHS_IMAG]] : f32
// CHECK: %[[LHS_IS_INF_AND_RHS_REAL_IS_NAN:.*]] = arith.andi %[[LHS_IS_INF]], %[[RHS_REAL_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[RHS_REAL]] : f32
-// CHECK: %[[RHS_REAL1:.*]] = select %[[LHS_IS_INF_AND_RHS_REAL_IS_NAN]], %[[TMP]], %[[RHS_REAL]] : f32
+// CHECK: %[[RHS_REAL1:.*]] = arith.select %[[LHS_IS_INF_AND_RHS_REAL_IS_NAN]], %[[TMP]], %[[RHS_REAL]] : f32
// CHECK: %[[LHS_IS_INF_AND_RHS_IMAG_IS_NAN:.*]] = arith.andi %[[LHS_IS_INF]], %[[RHS_IMAG_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[RHS_IMAG]] : f32
-// CHECK: %[[RHS_IMAG1:.*]] = select %[[LHS_IS_INF_AND_RHS_IMAG_IS_NAN]], %[[TMP]], %[[RHS_IMAG]] : f32
+// CHECK: %[[RHS_IMAG1:.*]] = arith.select %[[LHS_IS_INF_AND_RHS_IMAG_IS_NAN]], %[[TMP]], %[[RHS_IMAG]] : f32
// Case 2. RHS_REAL or RHS_IMAG are infinite.
// CHECK: %[[RHS_REAL_IS_INF:.*]] = arith.cmpf oeq, %[[RHS_REAL_ABS]], %[[INF]] : f32
@@ -272,18 +272,18 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[RHS_IS_INF:.*]] = arith.ori %[[RHS_REAL_IS_INF]], %[[RHS_IMAG_IS_INF]] : i1
// CHECK: %[[LHS_REAL_IS_NAN:.*]] = arith.cmpf uno, %[[LHS_REAL1]], %[[LHS_REAL1]] : f32
// CHECK: %[[LHS_IMAG_IS_NAN:.*]] = arith.cmpf uno, %[[LHS_IMAG1]], %[[LHS_IMAG1]] : f32
-// CHECK: %[[RHS_REAL_IS_INF_FLOAT:.*]] = select %[[RHS_REAL_IS_INF]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[RHS_REAL_IS_INF_FLOAT:.*]] = arith.select %[[RHS_REAL_IS_INF]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[TMP:.*]] = math.copysign %[[RHS_REAL_IS_INF_FLOAT]], %[[RHS_REAL1]] : f32
-// CHECK: %[[RHS_REAL2:.*]] = select %[[RHS_IS_INF]], %[[TMP]], %[[RHS_REAL1]] : f32
-// CHECK: %[[RHS_IMAG_IS_INF_FLOAT:.*]] = select %[[RHS_IMAG_IS_INF]], %[[ONE]], %[[ZERO]] : f32
+// CHECK: %[[RHS_REAL2:.*]] = arith.select %[[RHS_IS_INF]], %[[TMP]], %[[RHS_REAL1]] : f32
+// CHECK: %[[RHS_IMAG_IS_INF_FLOAT:.*]] = arith.select %[[RHS_IMAG_IS_INF]], %[[ONE]], %[[ZERO]] : f32
// CHECK: %[[TMP:.*]] = math.copysign %[[RHS_IMAG_IS_INF_FLOAT]], %[[RHS_IMAG1]] : f32
-// CHECK: %[[RHS_IMAG2:.*]] = select %[[RHS_IS_INF]], %[[TMP]], %[[RHS_IMAG1]] : f32
+// CHECK: %[[RHS_IMAG2:.*]] = arith.select %[[RHS_IS_INF]], %[[TMP]], %[[RHS_IMAG1]] : f32
// CHECK: %[[RHS_IS_INF_AND_LHS_REAL_IS_NAN:.*]] = arith.andi %[[RHS_IS_INF]], %[[LHS_REAL_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[LHS_REAL1]] : f32
-// CHECK: %[[LHS_REAL2:.*]] = select %[[RHS_IS_INF_AND_LHS_REAL_IS_NAN]], %[[TMP]], %[[LHS_REAL1]] : f32
+// CHECK: %[[LHS_REAL2:.*]] = arith.select %[[RHS_IS_INF_AND_LHS_REAL_IS_NAN]], %[[TMP]], %[[LHS_REAL1]] : f32
// CHECK: %[[RHS_IS_INF_AND_LHS_IMAG_IS_NAN:.*]] = arith.andi %[[RHS_IS_INF]], %[[LHS_IMAG_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[LHS_IMAG1]] : f32
-// CHECK: %[[LHS_IMAG2:.*]] = select %[[RHS_IS_INF_AND_LHS_IMAG_IS_NAN]], %[[TMP]], %[[LHS_IMAG1]] : f32
+// CHECK: %[[LHS_IMAG2:.*]] = arith.select %[[RHS_IS_INF_AND_LHS_IMAG_IS_NAN]], %[[TMP]], %[[LHS_IMAG1]] : f32
// CHECK: %[[RECALC:.*]] = arith.ori %[[LHS_IS_INF]], %[[RHS_IS_INF]] : i1
// Case 3. One of the pairwise products of left hand side with right hand side
@@ -300,16 +300,16 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[IS_SPECIAL_CASE3:.*]] = arith.andi %[[IS_SPECIAL_CASE2]], %[[NOT_RECALC]] : i1
// CHECK: %[[IS_SPECIAL_CASE_AND_LHS_REAL_IS_NAN:.*]] = arith.andi %[[IS_SPECIAL_CASE3]], %[[LHS_REAL_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[LHS_REAL2]] : f32
-// CHECK: %[[LHS_REAL3:.*]] = select %[[IS_SPECIAL_CASE_AND_LHS_REAL_IS_NAN]], %[[TMP]], %[[LHS_REAL2]] : f32
+// CHECK: %[[LHS_REAL3:.*]] = arith.select %[[IS_SPECIAL_CASE_AND_LHS_REAL_IS_NAN]], %[[TMP]], %[[LHS_REAL2]] : f32
// CHECK: %[[IS_SPECIAL_CASE_AND_LHS_IMAG_IS_NAN:.*]] = arith.andi %[[IS_SPECIAL_CASE3]], %[[LHS_IMAG_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[LHS_IMAG2]] : f32
-// CHECK: %[[LHS_IMAG3:.*]] = select %[[IS_SPECIAL_CASE_AND_LHS_IMAG_IS_NAN]], %[[TMP]], %[[LHS_IMAG2]] : f32
+// CHECK: %[[LHS_IMAG3:.*]] = arith.select %[[IS_SPECIAL_CASE_AND_LHS_IMAG_IS_NAN]], %[[TMP]], %[[LHS_IMAG2]] : f32
// CHECK: %[[IS_SPECIAL_CASE_AND_RHS_REAL_IS_NAN:.*]] = arith.andi %[[IS_SPECIAL_CASE3]], %[[RHS_REAL_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[RHS_REAL2]] : f32
-// CHECK: %[[RHS_REAL3:.*]] = select %[[IS_SPECIAL_CASE_AND_RHS_REAL_IS_NAN]], %[[TMP]], %[[RHS_REAL2]] : f32
+// CHECK: %[[RHS_REAL3:.*]] = arith.select %[[IS_SPECIAL_CASE_AND_RHS_REAL_IS_NAN]], %[[TMP]], %[[RHS_REAL2]] : f32
// CHECK: %[[IS_SPECIAL_CASE_AND_RHS_IMAG_IS_NAN:.*]] = arith.andi %[[IS_SPECIAL_CASE3]], %[[RHS_IMAG_IS_NAN]] : i1
// CHECK: %[[TMP:.*]] = math.copysign %[[ZERO]], %[[RHS_IMAG2]] : f32
-// CHECK: %[[RHS_IMAG3:.*]] = select %[[IS_SPECIAL_CASE_AND_RHS_IMAG_IS_NAN]], %[[TMP]], %[[RHS_IMAG2]] : f32
+// CHECK: %[[RHS_IMAG3:.*]] = arith.select %[[IS_SPECIAL_CASE_AND_RHS_IMAG_IS_NAN]], %[[TMP]], %[[RHS_IMAG2]] : f32
// CHECK: %[[RECALC2:.*]] = arith.ori %[[RECALC]], %[[IS_SPECIAL_CASE3]] : i1
// CHECK: %[[RECALC3:.*]] = arith.andi %[[IS_NAN]], %[[RECALC2]] : i1
@@ -318,14 +318,14 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_IMAG3]], %[[RHS_IMAG3]] : f32
// CHECK: %[[NEW_REAL:.*]] = arith.subf %[[LHS_REAL_TIMES_RHS_REAL]], %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32
// CHECK: %[[NEW_REAL_TIMES_INF:.*]] = arith.mulf %[[INF]], %[[NEW_REAL]] : f32
-// CHECK: %[[FINAL_REAL:.*]] = select %[[RECALC3]], %[[NEW_REAL_TIMES_INF]], %[[REAL]] : f32
+// CHECK: %[[FINAL_REAL:.*]] = arith.select %[[RECALC3]], %[[NEW_REAL_TIMES_INF]], %[[REAL]] : f32
// Recalculate imag part.
// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_IMAG3]], %[[RHS_REAL3]] : f32
// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_REAL3]], %[[RHS_IMAG3]] : f32
// CHECK: %[[NEW_IMAG:.*]] = arith.addf %[[LHS_IMAG_TIMES_RHS_REAL]], %[[LHS_REAL_TIMES_RHS_IMAG]] : f32
// CHECK: %[[NEW_IMAG_TIMES_INF:.*]] = arith.mulf %[[INF]], %[[NEW_IMAG]] : f32
-// CHECK: %[[FINAL_IMAG:.*]] = select %[[RECALC3]], %[[NEW_IMAG_TIMES_INF]], %[[IMAG]] : f32
+// CHECK: %[[FINAL_IMAG:.*]] = arith.select %[[RECALC3]], %[[NEW_IMAG_TIMES_INF]], %[[IMAG]] : f32
// CHECK: %[[RESULT:.*]] = complex.create %[[FINAL_REAL]], %[[FINAL_IMAG]] : complex<f32>
// CHECK: return %[[RESULT]] : complex<f32>
@@ -379,7 +379,7 @@ func @complex_sign(%arg: complex<f32>) -> complex<f32> {
// CHECK: %[[REAL_SIGN:.*]] = arith.divf %[[REAL]], %[[NORM]] : f32
// CHECK: %[[IMAG_SIGN:.*]] = arith.divf %[[IMAG]], %[[NORM]] : f32
// CHECK: %[[SIGN:.*]] = complex.create %[[REAL_SIGN]], %[[IMAG_SIGN]] : complex<f32>
-// CHECK: %[[RESULT:.*]] = select %[[IS_ZERO]], %[[ARG]], %[[SIGN]] : complex<f32>
+// CHECK: %[[RESULT:.*]] = arith.select %[[IS_ZERO]], %[[ARG]], %[[SIGN]] : complex<f32>
// CHECK: return %[[RESULT]] : complex<f32>
// CHECK-LABEL: func @complex_sub
diff --git a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
index 9a4e134cef2ef..cb07089a3a681 100644
--- a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
@@ -92,7 +92,7 @@ func @reduction2(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: combiner
// CHECK: ^{{.*}}(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32)
// CHECK: %[[CMP:.*]] = arith.cmpf oge, %[[ARG0]], %[[ARG1]]
-// CHECK: %[[RES:.*]] = select %[[CMP]], %[[ARG0]], %[[ARG1]]
+// CHECK: %[[RES:.*]] = arith.select %[[CMP]], %[[ARG0]], %[[ARG1]]
// CHECK: omp.yield(%[[RES]] : f32)
// CHECK-NOT: atomic
@@ -108,7 +108,7 @@ func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
scf.reduce(%one) : f32 {
^bb0(%lhs : f32, %rhs: f32):
%cmp = arith.cmpf oge, %lhs, %rhs : f32
- %res = select %cmp, %lhs, %rhs : f32
+ %res = arith.select %cmp, %lhs, %rhs : f32
scf.reduce.return %res : f32
}
}
@@ -126,7 +126,7 @@ func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: combiner
// CHECK: ^{{.*}}(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32)
// CHECK: %[[CMP:.*]] = arith.cmpf oge, %[[ARG0]], %[[ARG1]]
-// CHECK: %[[RES:.*]] = select %[[CMP]], %[[ARG0]], %[[ARG1]]
+// CHECK: %[[RES:.*]] = arith.select %[[CMP]], %[[ARG0]], %[[ARG1]]
// CHECK: omp.yield(%[[RES]] : f32)
// CHECK-NOT: atomic
@@ -140,7 +140,7 @@ func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: combiner
// CHECK: ^{{.*}}(%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64)
// CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[ARG0]], %[[ARG1]]
-// CHECK: %[[RES:.*]] = select %[[CMP]], %[[ARG1]], %[[ARG0]]
+// CHECK: %[[RES:.*]] = arith.select %[[CMP]], %[[ARG1]], %[[ARG0]]
// CHECK: omp.yield(%[[RES]] : i64)
// CHECK: atomic
@@ -172,7 +172,7 @@ func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
scf.reduce(%one) : f32 {
^bb0(%lhs : f32, %rhs: f32):
%cmp = arith.cmpf oge, %lhs, %rhs : f32
- %res = select %cmp, %lhs, %rhs : f32
+ %res = arith.select %cmp, %lhs, %rhs : f32
scf.reduce.return %res : f32
}
// CHECK: arith.fptosi
@@ -181,7 +181,7 @@ func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
scf.reduce(%1) : i64 {
^bb1(%lhs: i64, %rhs: i64):
%cmp = arith.cmpi slt, %lhs, %rhs : i64
- %res = select %cmp, %rhs, %lhs : i64
+ %res = arith.select %cmp, %rhs, %lhs : i64
scf.reduce.return %res : i64
}
// CHECK: omp.yield
diff --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
index ea0ef33862ce5..97d4c9f69bab6 100644
--- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
@@ -366,9 +366,9 @@ func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : ten
// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex>
// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex>
// CHECK: %[[CMP0:.*]] = arith.cmpi ugt, %[[RANK1]], %[[RANK0]] : index
-// CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
+// CHECK: %[[LARGER_DIM:.*]] = arith.select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
// CHECK: %[[CMP1:.*]] = arith.cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index
-// CHECK: %[[MAX_RANK:.*]] = select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
+// CHECK: %[[MAX_RANK:.*]] = arith.select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
// CHECK: %[[DIM_DIFF0:.*]] = arith.subi %[[MAX_RANK]], %[[RANK0]] : index
// CHECK: %[[DIM_DIFF1:.*]] = arith.subi %[[MAX_RANK]], %[[RANK1]] : index
// CHECK: %[[DIM_DIFF2:.*]] = arith.subi %[[MAX_RANK]], %[[RANK2]] : index
@@ -382,7 +382,7 @@ func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : ten
// CHECK: %[[IDX0:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF0]] : index
// CHECK: %[[EXTRACTED_0:.*]] = tensor.extract %[[ARG0]]{{\[}}%[[IDX0]]] : tensor<2xindex>
// CHECK: %[[DIM0_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_0:.*]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM0:.*]] = select %[[DIM0_IS_1]], %[[C1_0]], %[[EXTRACTED_0]] : index
+// CHECK: %[[MAX_DIM0:.*]] = arith.select %[[DIM0_IS_1]], %[[C1_0]], %[[EXTRACTED_0]] : index
// CHECK: }
// CHECK: %[[VAL_28:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[DIM1:.*]] = scf.if %[[VAL_28]] -> (index) {
@@ -391,7 +391,7 @@ func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : ten
// CHECK: %[[IDX1:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[EXTRACTED_1:.*]] = tensor.extract %[[ARG1]]{{\[}}%[[IDX1]]] : tensor<3xindex>
// CHECK: %[[DIM1_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_1:.*]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM1:.*]] = select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
+// CHECK: %[[MAX_DIM1:.*]] = arith.select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
// CHECK: }
// CHECK: %[[VAL_36:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[DIM2:.*]] = scf.if %[[VAL_36]] -> (index) {
@@ -400,7 +400,7 @@ func @try_is_broadcastable (%a : tensor<2xindex>, %b : tensor<3xindex>, %c : ten
// CHECK: %[[IDX2:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[EXTRACTED_2:.*]] = tensor.extract %[[ARG2]]{{\[}}%[[IDX2]]] : tensor<2xindex>
// CHECK: %[[DIM2_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_2]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM2:.*]] = select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
+// CHECK: %[[MAX_DIM2:.*]] = arith.select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
// CHECK: }
// CHECK: %[[OUT_BOUND_0:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF0]] : index
// CHECK: %[[REDUCTION_0:.*]] = scf.if %[[OUT_BOUND_0]] -> (i1) {
@@ -456,9 +456,9 @@ func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>
// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex>
// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex>
// CHECK: %[[CMP0:.*]] = arith.cmpi ugt, %[[RANK1]], %[[RANK0]] : index
-// CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
+// CHECK: %[[LARGER_DIM:.*]] = arith.select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
// CHECK: %[[CMP1:.*]] = arith.cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index
-// CHECK: %[[MAX_RANK:.*]] = select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
+// CHECK: %[[MAX_RANK:.*]] = arith.select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
// CHECK: %[[DIM_DIFF0:.*]] = arith.subi %[[MAX_RANK]], %[[RANK0]] : index
// CHECK: %[[DIM_DIFF1:.*]] = arith.subi %[[MAX_RANK]], %[[RANK1]] : index
// CHECK: %[[DIM_DIFF2:.*]] = arith.subi %[[MAX_RANK]], %[[RANK2]] : index
@@ -472,7 +472,7 @@ func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>
// CHECK: %[[IDX0:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF0]] : index
// CHECK: %[[EXTRACTED_0:.*]] = tensor.extract %[[ARG0]]{{\[}}%[[IDX0]]] : tensor<2xindex>
// CHECK: %[[DIM0_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_0:.*]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM0:.*]] = select %[[DIM0_IS_1]], %[[C1_0]], %[[EXTRACTED_0]] : index
+// CHECK: %[[MAX_DIM0:.*]] = arith.select %[[DIM0_IS_1]], %[[C1_0]], %[[EXTRACTED_0]] : index
// CHECK: }
// CHECK: %[[VAL_28:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[DIM1:.*]] = scf.if %[[VAL_28]] -> (index) {
@@ -481,7 +481,7 @@ func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>
// CHECK: %[[IDX1:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[EXTRACTED_1:.*]] = tensor.extract %[[ARG1]]{{\[}}%[[IDX1]]] : tensor<3xindex>
// CHECK: %[[DIM1_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_1:.*]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM1:.*]] = select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
+// CHECK: %[[MAX_DIM1:.*]] = arith.select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
// CHECK: }
// CHECK: %[[VAL_36:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[DIM2:.*]] = scf.if %[[VAL_36]] -> (index) {
@@ -490,7 +490,7 @@ func @broadcast(%a : tensor<2xindex>, %b : tensor<3xindex>, %c : tensor<2xindex>
// CHECK: %[[IDX2:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[EXTRACTED_2:.*]] = tensor.extract %[[ARG2]]{{\[}}%[[IDX2]]] : tensor<2xindex>
// CHECK: %[[DIM2_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_2]], %[[C1_0]] : index
-// CHECK: %[[MAX_DIM2:.*]] = select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
+// CHECK: %[[MAX_DIM2:.*]] = arith.select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
// CHECK: }
// CHECK: %[[OUT_BOUND_0:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF0]] : index
// CHECK: %[[REDUCTION_0:.*]] = scf.if %[[OUT_BOUND_0]] -> (i1) {
@@ -548,9 +548,9 @@ func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
// CHECK: %[[RANK1:.*]] = tensor.dim %[[ARG1]], %[[C0]] : tensor<3xindex>
// CHECK: %[[RANK2:.*]] = tensor.dim %[[ARG2]], %[[C0]] : tensor<2xindex>
// CHECK: %[[CMP0:.*]] = arith.cmpi ugt, %[[RANK1]], %[[RANK0]] : index
-// CHECK: %[[LARGER_DIM:.*]] = select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
+// CHECK: %[[LARGER_DIM:.*]] = arith.select %[[CMP0]], %[[RANK1]], %[[RANK0]] : index
// CHECK: %[[CMP1:.*]] = arith.cmpi ugt, %[[RANK2]], %[[LARGER_DIM]] : index
-// CHECK: %[[MAX_RANK:.*]] = select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
+// CHECK: %[[MAX_RANK:.*]] = arith.select %[[CMP1]], %[[RANK2]], %[[LARGER_DIM]] : index
// CHECK: %[[DIM_DIFF0:.*]] = arith.subi %[[MAX_RANK]], %[[RANK0]] : index
// CHECK: %[[DIM_DIFF1:.*]] = arith.subi %[[MAX_RANK]], %[[RANK1]] : index
// CHECK: %[[DIM_DIFF2:.*]] = arith.subi %[[MAX_RANK]], %[[RANK2]] : index
@@ -564,7 +564,7 @@ func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
// CHECK: %[[IDX0:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF0]] : index
// CHECK: %[[EXTRACTED_0:.*]] = tensor.extract %[[ARG0]]{{\[}}%[[IDX0]]] : tensor<2xindex>
// CHECK: %[[DIM0_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_0:.*]], %[[C1]] : index
-// CHECK: %[[MAX_DIM0:.*]] = select %[[DIM0_IS_1]], %[[C1]], %[[EXTRACTED_0]] : index
+// CHECK: %[[MAX_DIM0:.*]] = arith.select %[[DIM0_IS_1]], %[[C1]], %[[EXTRACTED_0]] : index
// CHECK: }
// CHECK: %[[VAL_28:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[DIM1:.*]] = scf.if %[[VAL_28]] -> (index) {
@@ -573,7 +573,7 @@ func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
// CHECK: %[[IDX1:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF1]] : index
// CHECK: %[[EXTRACTED_1:.*]] = tensor.extract %[[ARG1]]{{\[}}%[[IDX1]]] : tensor<3xindex>
// CHECK: %[[DIM1_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_1:.*]], %[[C1]] : index
-// CHECK: %[[MAX_DIM1:.*]] = select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
+// CHECK: %[[MAX_DIM1:.*]] = arith.select %[[DIM1_IS_1]], %[[DIM0]], %[[EXTRACTED_1]] : index
// CHECK: }
// CHECK: %[[VAL_36:.*]] = arith.cmpi ult, %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[DIM2:.*]] = scf.if %[[VAL_36]] -> (index) {
@@ -582,7 +582,7 @@ func @broadcast_3_shapes_
diff erent_extents(%a : tensor<2xindex>,
// CHECK: %[[IDX2:.*]] = arith.subi %[[IDX]], %[[DIM_DIFF2]] : index
// CHECK: %[[EXTRACTED_2:.*]] = tensor.extract %[[ARG2]]{{\[}}%[[IDX2]]] : tensor<2xindex>
// CHECK: %[[DIM2_IS_1:.*]] = arith.cmpi eq, %[[EXTRACTED_2:.*]], %[[C1]] : index
-// CHECK: %[[MAX_DIM2:.*]] = select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
+// CHECK: %[[MAX_DIM2:.*]] = arith.select %[[DIM2_IS_1]], %[[DIM1]], %[[EXTRACTED_2]] : index
// CHECK: }
// CHECK: tensor.yield %[[DIM2]] : index
// CHECK: } : tensor<?xindex>
@@ -614,7 +614,7 @@ func @split_at(%shape: tensor<?xindex>, %index: index) -> (tensor<?xindex>, tens
// CHECK-NEXT: %[[RANK:.*]] = tensor.dim %[[SHAPE]], %[[C0]] : tensor<?xindex>
// CHECK-NEXT: %[[POSINDEX:.*]] = arith.addi %[[INDEX]], %[[RANK]] : index
// CHECK-NEXT: %[[ISNEG:.*]] = arith.cmpi slt, %[[INDEX]], %[[C0]] : index
- // CHECK-NEXT: %[[SELECT:.*]] = select %[[ISNEG]], %[[POSINDEX]], %[[INDEX]] : index
+ // CHECK-NEXT: %[[SELECT:.*]] = arith.select %[[ISNEG]], %[[POSINDEX]], %[[INDEX]] : index
// CHECK-NEXT: %[[C1:.*]] = arith.constant 1 : index
// CHECK-NEXT: %[[HEAD:.*]] = tensor.extract_slice %[[SHAPE]][%[[C0]]] [%[[SELECT]]] [%[[C1]]] : tensor<?xindex> to tensor<?xindex>
// CHECK-NEXT: %[[TAIL_SIZE:.*]] = arith.subi %[[RANK]], %[[SELECT]] : index
diff --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
index 71bd62a009e23..e292d8dae100d 100644
--- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
@@ -427,13 +427,6 @@ func @multireturn_caller() {
return
}
-// CHECK-LABEL: @select
-func @select(%arg0 : i1, %arg1 : i32, %arg2 : i32) -> i32 {
-// CHECK: = llvm.select %arg0, %arg1, %arg2 : i1, i32
- %0 = select %arg0, %arg1, %arg2 : i32
- return %0 : i32
-}
-
// CHECK-LABEL: @dfs_block_order
func @dfs_block_order(%arg0: i32) -> (i32) {
// CHECK-NEXT: %[[CST:.*]] = llvm.mlir.constant(42 : i32) : i32
@@ -519,19 +512,6 @@ func @fmaf(%arg0: f32, %arg1: vector<4xf32>) {
// -----
-// CHECK-LABEL: func @select_2dvector(
-func @select_2dvector(%arg0 : vector<4x3xi1>, %arg1 : vector<4x3xi32>, %arg2 : vector<4x3xi32>) {
- // CHECK: %[[EXTRACT1:.*]] = llvm.extractvalue %arg0[0] : !llvm.array<4 x vector<3xi1>>
- // CHECK: %[[EXTRACT2:.*]] = llvm.extractvalue %arg1[0] : !llvm.array<4 x vector<3xi32>>
- // CHECK: %[[EXTRACT3:.*]] = llvm.extractvalue %arg2[0] : !llvm.array<4 x vector<3xi32>>
- // CHECK: %[[SELECT:.*]] = llvm.select %[[EXTRACT1]], %[[EXTRACT2]], %[[EXTRACT3]] : vector<3xi1>, vector<3xi32>
- // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[SELECT]], %0[0] : !llvm.array<4 x vector<3xi32>>
- %0 = select %arg0, %arg1, %arg2 : vector<4x3xi1>, vector<4x3xi32>
- std.return
-}
-
-// -----
-
// CHECK-LABEL: func @switchi8(
func @switchi8(%arg0 : i8) -> i32 {
switch %arg0 : i8, [
diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
index 5790c3dcbf03c..8926d7c9838dd 100644
--- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
@@ -1,7 +1,7 @@
// RUN: mlir-opt -split-input-file -convert-std-to-spirv -verify-diagnostics %s | FileCheck %s
//===----------------------------------------------------------------------===//
-// std.select
+// arith.select
//===----------------------------------------------------------------------===//
module attributes {
@@ -848,24 +848,6 @@ func @sitofp(%arg0 : i64) -> f64 {
// -----
-module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64],
- [SPV_KHR_storage_buffer_storage_class]>, {}>
-} {
-
-// CHECK-LABEL: @select
-func @select(%arg0 : i32, %arg1 : i32) {
- %0 = arith.cmpi sle, %arg0, %arg1 : i32
- // CHECK: spv.Select
- %1 = select %0, %arg0, %arg1 : i32
- return
-}
-
-} // end module
-
-// -----
-
//===----------------------------------------------------------------------===//
// std.return
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
index bd08b1ae2be4d..f04759c9a5a6e 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
@@ -228,28 +228,28 @@ func @avg_pool(%arg0: tensor<1x6x34x62xf32>) -> (tensor<1x5x33x62xf32>) {
// CHECK: [[PAD0:%.+]] = arith.constant 1
// CHECK: [[SUBP0:%.+]] = arith.subi [[IDX1]], [[PAD0]]
// CHECK: [[P0CMP:%.+]] = arith.cmpi slt, [[SUBP0]], [[ZERO]]
- // CHECK: [[SELP0:%.+]] = select [[P0CMP]], [[SUBP0]], [[ZERO]]
+ // CHECK: [[SELP0:%.+]] = arith.select [[P0CMP]], [[SUBP0]], [[ZERO]]
// CHECK: [[ADDP0:%.+]] = arith.addi [[KH]], [[SELP0]]
// CHECK: [[PAD1:%.+]] = arith.constant 1
// CHECK: [[SUBP1:%.+]] = arith.subi [[NY]], [[PAD1]]
// CHECK: [[P1CMP:%.+]] = arith.cmpi slt, [[SUBP1]], [[ZERO]]
- // CHECK: [[SELP1:%.+]] = select [[P1CMP]], [[SUBP1]], [[ZERO]]
+ // CHECK: [[SELP1:%.+]] = arith.select [[P1CMP]], [[SUBP1]], [[ZERO]]
// CHECK: [[ADDP1:%.+]] = arith.addi [[ADDP0]], [[SELP1]]
// CHECK: [[YCMP:%.+]] = arith.cmpi slt, [[ADDP1]], [[ONE]]
- // CHECK: [[YSEL:%.+]] = select [[YCMP]], [[ONE]], [[ADDP1]]
+ // CHECK: [[YSEL:%.+]] = arith.select [[YCMP]], [[ONE]], [[ADDP1]]
// CHECK: [[KW:%.+]] = arith.constant 4 : index
// CHECK: [[PAD2:%.+]] = arith.constant 1 : index
// CHECK: [[SUBP2:%.+]] = arith.subi [[IDX2]], [[PAD2]]
// CHECK: [[P2CMP:%.+]] = arith.cmpi slt, [[SUBP2]], [[ZERO]]
- // CHECK: [[SELP2:%.+]] = select [[P2CMP]], [[SUBP2]], [[ZERO]]
+ // CHECK: [[SELP2:%.+]] = arith.select [[P2CMP]], [[SUBP2]], [[ZERO]]
// CHECK: [[ADDP2:%.+]] = arith.addi [[KW]], [[SELP2]]
// CHECK: [[PAD3:%.+]] = arith.constant 1 : index
// CHECK: [[SUBP3:%.+]] = arith.subi [[NX]], [[PAD3]]
// CHECK: [[P3CMP:%.+]] = arith.cmpi slt, [[SUBP3]], [[ZERO]]
- // CHECK: [[SELP3:%.+]] = select [[P3CMP]], [[SUBP3]], [[ZERO]]
+ // CHECK: [[SELP3:%.+]] = arith.select [[P3CMP]], [[SUBP3]], [[ZERO]]
// CHECK: [[ADDP3:%.+]] = arith.addi [[ADDP2]], [[SELP3]]
// CHECK: [[XCMP:%.+]] = arith.cmpi slt, [[ADDP3]], [[ONE]]
- // CHECK: [[XSEL:%.+]] = select [[XCMP]], [[ONE]], [[ADDP3]]
+ // CHECK: [[XSEL:%.+]] = arith.select [[XCMP]], [[ONE]], [[ADDP3]]
// Given the valid coverage of the pooling region, normalize the summation.
// CHECK: [[C:%.+]] = arith.muli [[YSEL]], [[XSEL]]
@@ -299,9 +299,9 @@ func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
// CHECK: %[[MIN:.+]] = arith.constant -128
// CHECK: %[[MAX:.+]] = arith.constant 127
// CHECK: %[[CMP_MIN:.+]] = arith.cmpi slt, %[[OUT]], %[[MIN]]
- // CHECK: %[[CLMP_MIN:.+]] = select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
+ // CHECK: %[[CLMP_MIN:.+]] = arith.select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
// CHECK: %[[CMP_MAX:.+]] = arith.cmpi slt, %[[MAX]], %[[OUT]]
- // CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
+ // CHECK: %[[CLMP_MAX:.+]] = arith.select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
// CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
// CHECK: linalg.yield %[[TRUNC]]
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8>
@@ -328,9 +328,9 @@ func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
// CHECK: %[[MIN:.+]] = arith.constant -32768
// CHECK: %[[MAX:.+]] = arith.constant 32767
// CHECK: %[[CMP_MIN:.+]] = arith.cmpi slt, %[[OUT]], %[[MIN]]
- // CHECK: %[[CLMP_MIN:.+]] = select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
+ // CHECK: %[[CLMP_MIN:.+]] = arith.select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
// CHECK: %[[CMP_MAX:.+]] = arith.cmpi slt, %[[MAX]], %[[OUT]]
- // CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
+ // CHECK: %[[CLMP_MAX:.+]] = arith.select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
// CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
// CHECK: linalg.yield %[[TRUNC]]
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16>
diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index cb7f42ba4242a..9828fe8472b6e 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -459,18 +459,18 @@ func @test_i8(%arg0: tensor<1xi8>) -> () {
// CHECK-DAG: %[[C127:.+]] = arith.constant -127
// CHECK-DAG: %[[C126:.+]] = arith.constant 126
// CHECK-DAG: %[[CMP1:.+]] = arith.cmpi slt, %arg1, %[[C127]]
- // CHECK-DAG: %[[SEL1:.+]] = select %[[CMP1]], %[[C127]]
+ // CHECK-DAG: %[[SEL1:.+]] = arith.select %[[CMP1]], %[[C127]]
// CHECK-DAG: %[[CMP2:.+]] = arith.cmpi slt, %[[C126]], %arg1
- // CHECK: %[[SEL2:.+]] = select %[[CMP2]], %[[C126]], %[[SEL1]]
+ // CHECK: %[[SEL2:.+]] = arith.select %[[CMP2]], %[[C126]], %[[SEL1]]
%0 = "tosa.clamp"(%arg0) {min_int = -127 : i64, max_int = 126 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8>
// CHECK: linalg.generic
// CHECK-DAG: %[[C128:.+]] = arith.constant -128
// CHECK-DAG: %[[C127:.+]] = arith.constant 127
// CHECK-DAG: %[[CMP1:.+]] = arith.cmpi slt, %arg1, %[[C128]]
- // CHECK-DAG: %[[SEL1:.+]] = select %[[CMP1]], %[[C128]]
+ // CHECK-DAG: %[[SEL1:.+]] = arith.select %[[CMP1]], %[[C128]]
// CHECK-DAG: %[[CMP2:.+]] = arith.cmpi slt, %[[C127]], %arg1
- // CHECK: %[[SEL2:.+]] = select %[[CMP2]], %[[C127]], %[[SEL1]]
+ // CHECK: %[[SEL2:.+]] = arith.select %[[CMP2]], %[[C127]], %[[SEL1]]
%1 = "tosa.clamp"(%arg0) {min_int = -130 : i64, max_int = 130 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi8>) -> tensor<1xi8>
return
@@ -511,9 +511,9 @@ func @test_negate_quantized(%arg0: tensor<1xi8>) -> () {
// CHECK: [[MIN:%.+]] = arith.constant -128
// CHECK: [[MAX:%.+]] = arith.constant 127
// CHECK: [[PRED1:%.+]] = arith.cmpi slt, [[SUB]], [[MIN]]
- // CHECK: [[LBOUND:%.+]] = select [[PRED1]], [[MIN]], [[SUB]]
+ // CHECK: [[LBOUND:%.+]] = arith.select [[PRED1]], [[MIN]], [[SUB]]
// CHECK: [[PRED2:%.+]] = arith.cmpi slt, [[MAX]], [[SUB]]
- // CHECK: [[UBOUND:%.+]] = select [[PRED2]], [[MAX]], [[LBOUND]]
+ // CHECK: [[UBOUND:%.+]] = arith.select [[PRED2]], [[MAX]], [[LBOUND]]
// CHECK: [[TRUNC:%.+]] = arith.trunci [[UBOUND]]
// CHECK: linalg.yield [[TRUNC]]
%0 = "tosa.negate"(%arg0) {quantization_info = { input_zp = 0 : i32, output_zp = 0 : i32}} : (tensor<1xi8>) -> tensor<1xi8>
@@ -793,7 +793,7 @@ func @reduce_float_dyn_multiple(%arg0: tensor<?x?xf32>) -> () {
// CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "reduction"]} ins(%arg0 : tensor<?x?xf32>) outs(%[[FILL]] : tensor<?xf32>)
// CHECK: ^bb0(%arg1: f32, %arg2: f32)
// CHECK: %[[CMP:.+]] = arith.cmpf ogt, %arg1, %arg2 : f32
- // CHECK: %[[RES:.+]] = select %[[CMP]], %arg1, %arg2 : f32
+ // CHECK: %[[RES:.+]] = arith.select %[[CMP]], %arg1, %arg2 : f32
// CHECK: linalg.yield %[[RES]] : f32
// CHECK: tensor.expand_shape %[[GENERIC]] {{\[}}[0, 1]] : tensor<?xf32> into tensor<?x1xf32>
%0 = "tosa.reduce_max"(%arg0) {axis = 1 : i64} : (tensor<?x?xf32>) -> tensor<?x1xf32>
@@ -973,8 +973,8 @@ func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
// CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
// CHECK-DAG: [[MINLT:%.+]] = arith.cmpi slt, [[SCALED_ZEROED]], [[CMIN]]
// CHECK-DAG: [[MAXLT:%.+]] = arith.cmpi slt, [[CMAX]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[LOWER:%.+]] = select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[BOUNDED:%.+]] = select [[MAXLT]], [[CMAX]], [[LOWER]]
+ // CHECK-DAG: [[LOWER:%.+]] = arith.select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
+ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]]
// CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]]
// CHECK-DAG: linalg.yield [[TRUNC]]
%0 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = [19689 : i32], shift = [15 : i32], scale32 = false, double_round = false, per_channel = false} : (tensor<2xi8>) -> (tensor<2xi8>)
@@ -993,9 +993,9 @@ func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
// CHECK-DAG: [[CMIN:%.+]] = arith.constant 0
// CHECK-DAG: [[CMAX:%.+]] = arith.constant 255
// CHECK-DAG: [[MINLT:%.+]] = arith.cmpi slt, [[SCALED_ZEROED]], [[CMIN]]
- // CHECK-DAG: [[LOWER:%.+]] = select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
+ // CHECK-DAG: [[LOWER:%.+]] = arith.select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
// CHECK-DAG: [[MAXLT:%.+]] = arith.cmpi slt, [[CMAX]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[BOUNDED:%.+]] = select [[MAXLT]], [[CMAX]], [[LOWER]]
+ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]]
// CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]]
// CHECK-DAG: [[CAST:%.+]] = builtin.unrealized_conversion_cast [[TRUNC]] : i8 to ui8
// CHECK: linalg.yield [[CAST]]
@@ -1046,9 +1046,9 @@ func @rescale_ui8(%arg0 : tensor<2xui8>) -> () {
// CHECK-DAG: [[CMIN:%.+]] = arith.constant -128
// CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
// CHECK-DAG: [[MINLT:%.+]] = arith.cmpi slt, [[SCALED_ZEROED]], [[CMIN]]
- // CHECK-DAG: [[LOWER:%.+]] = select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
+ // CHECK-DAG: [[LOWER:%.+]] = arith.select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
// CHECK-DAG: [[MAXLT:%.+]] = arith.cmpi slt, [[CMAX]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[BOUNDED:%.+]] = select [[MAXLT]], [[CMAX]], [[LOWER]]
+ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]]
// CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]]
// CHECK: linalg.yield [[TRUNC]]
%0 = "tosa.rescale"(%arg0) {input_zp = 17 : i32, output_zp = 22 : i32, multiplier = [19689 : i32], shift = [15 : i32], scale32 = false, double_round = false, per_channel = false} : (tensor<2xui8>) -> (tensor<2xi8>)
@@ -1078,8 +1078,8 @@ func @rescale_per_channel(%arg0 : tensor<3xi8>) -> (tensor<3xi8>) {
// CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
// CHECK-DAG: [[MINLT:%.+]] = arith.cmpi slt, [[SCALED_ZEROED]], [[CMIN]]
// CHECK-DAG: [[MAXLT:%.+]] = arith.cmpi slt, [[CMAX]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[LOWER:%.+]] = select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
- // CHECK-DAG: [[BOUNDED:%.+]] = select [[MAXLT]], [[CMAX]], [[LOWER]]
+ // CHECK-DAG: [[LOWER:%.+]] = arith.select [[MINLT]], [[CMIN]], [[SCALED_ZEROED]]
+ // CHECK-DAG: [[BOUNDED:%.+]] = arith.select [[MAXLT]], [[CMAX]], [[LOWER]]
// CHECK-DAG: [[TRUNC:%.+]] = arith.trunci [[BOUNDED]]
// CHECK-DAG: linalg.yield [[TRUNC]]
%0 = "tosa.rescale"(%arg0) {input_zp = 243 : i32, output_zp = 252 : i32, multiplier = [42 : i32, 43 : i32, 44 : i32], shift = [14 : i32, 15 : i32, 64 : i32], scale32 = false, double_round = false, per_channel = false} : (tensor<3xi8>) -> (tensor<3xi8>)
@@ -1338,8 +1338,8 @@ func @argmax(%arg0 : tensor<3x2xi32>, %arg1 : tensor<6xf32>) -> () {
// CHECK: [[IDX:%.+]] = linalg.index 0
// CHECK: [[CAST:%.+]] = arith.index_cast [[IDX]]
// CHECK: [[CMP:%.+]] = arith.cmpi sgt, %arg2, %arg4
- // CHECK: [[SELECT_VAL:%.+]] = select [[CMP]], %arg2, %arg4
- // CHECK: [[SELECT_IDX:%.+]] = select [[CMP]], [[CAST]], %arg3
+ // CHECK: [[SELECT_VAL:%.+]] = arith.select [[CMP]], %arg2, %arg4
+ // CHECK: [[SELECT_IDX:%.+]] = arith.select [[CMP]], [[CAST]], %arg3
// CHECK: linalg.yield [[SELECT_IDX]], [[SELECT_VAL]]
%0 = "tosa.argmax"(%arg0) { axis = 0 : i64} : (tensor<3x2xi32>) -> (tensor<2xi32>)
@@ -1353,8 +1353,8 @@ func @argmax(%arg0 : tensor<3x2xi32>, %arg1 : tensor<6xf32>) -> () {
// CHECK: [[IDX:%.+]] = linalg.index 1
// CHECK: [[CAST:%.+]] = arith.index_cast [[IDX]]
// CHECK: [[CMP:%.+]] = arith.cmpi sgt, %arg2, %arg4
- // CHECK: [[SELECT_VAL:%.+]] = select [[CMP]], %arg2, %arg4
- // CHECK: [[SELECT_IDX:%.+]] = select [[CMP]], [[CAST]], %arg3
+ // CHECK: [[SELECT_VAL:%.+]] = arith.select [[CMP]], %arg2, %arg4
+ // CHECK: [[SELECT_IDX:%.+]] = arith.select [[CMP]], [[CAST]], %arg3
// CHECK: linalg.yield [[SELECT_IDX]], [[SELECT_VAL]]
%1 = "tosa.argmax"(%arg0) { axis = 1 : i64} : (tensor<3x2xi32>) -> (tensor<3xi32>)
@@ -1388,8 +1388,8 @@ func @argmax_dyn_non_axis(%arg0 : tensor<3x?xi32>) -> () {
// CHECK: %[[IDX:.+]] = linalg.index 0
// CHECK: %[[CAST:.+]] = arith.index_cast %[[IDX]]
// CHECK: %[[CMP:.+]] = arith.cmpi sgt, %arg1, %arg3
- // CHECK: %[[SELECT_VAL:.+]] = select %[[CMP]], %arg1, %arg3
- // CHECK: %[[SELECT_IDX:.+]] = select %[[CMP]], %[[CAST]], %arg2
+ // CHECK: %[[SELECT_VAL:.+]] = arith.select %[[CMP]], %arg1, %arg3
+ // CHECK: %[[SELECT_IDX:.+]] = arith.select %[[CMP]], %[[CAST]], %arg2
// CHECK: linalg.yield %[[SELECT_IDX]], %[[SELECT_VAL]]
%0 = "tosa.argmax"(%arg0) { axis = 0 : i64} : (tensor<3x?xi32>) -> (tensor<?xi32>)
return
@@ -1411,8 +1411,8 @@ func @argmax_dyn_axis(%arg0 : tensor<3x?xi32>) -> () {
// CHECK: %[[IDX:.+]] = linalg.index 1
// CHECK: %[[CAST:.+]] = arith.index_cast %[[IDX]]
// CHECK: %[[CMP:.+]] = arith.cmpi sgt, %arg1, %arg3
- // CHECK: %[[SELECT_VAL:.+]] = select %[[CMP]], %arg1, %arg3
- // CHECK: %[[SELECT_IDX:.+]] = select %[[CMP]], %[[CAST]], %arg2
+ // CHECK: %[[SELECT_VAL:.+]] = arith.select %[[CMP]], %arg1, %arg3
+ // CHECK: %[[SELECT_IDX:.+]] = arith.select %[[CMP]], %[[CAST]], %arg2
// CHECK: linalg.yield %[[SELECT_IDX]], %[[SELECT_VAL]]
%0 = "tosa.argmax"(%arg0) { axis = 1 : i64} : (tensor<3x?xi32>) -> (tensor<3xi32>)
return
@@ -1587,21 +1587,21 @@ func @resize_nearest(%input: tensor<1x2x2x1xf32>) -> () {
// CHECK-DAG: %[[VAL17:.+]] = arith.cmpf oge, %[[VAL13]], %[[ROUND]]
// CHECK-DAG: %[[ZERO:.+]] = arith.constant 0
// CHECK-DAG: %[[ONE:.+]] = arith.constant 1
- // CHECK-DAG: %[[VAL18:.+]] = select %[[VAL16]], %[[ONE]], %[[ZERO]]
- // CHECK-DAG: %[[VAL19:.+]] = select %[[VAL17]], %[[ONE]], %[[ZERO]]
+ // CHECK-DAG: %[[VAL18:.+]] = arith.select %[[VAL16]], %[[ONE]], %[[ZERO]]
+ // CHECK-DAG: %[[VAL19:.+]] = arith.select %[[VAL17]], %[[ONE]], %[[ZERO]]
// CHECK-DAG: %[[VAL20:.+]] = arith.addi %[[VAL14]], %[[VAL18]]
// CHECK-DAG: %[[VAL21:.+]] = arith.addi %[[VAL15]], %[[VAL19]]
// This section applies bound checking to be within the input image.
// CHECK-DAG: %[[VAL22:.+]] = arith.cmpi slt, %[[VAL20]], %[[XYMIN]]
- // CHECK-DAG: %[[VAL23:.+]] = select %[[VAL22]], %[[XYMIN]], %[[VAL20]]
+ // CHECK-DAG: %[[VAL23:.+]] = arith.select %[[VAL22]], %[[XYMIN]], %[[VAL20]]
// CHECK-DAG: %[[VAL24:.+]] = arith.cmpi slt, %[[YMAX]], %[[VAL20]]
- // CHECK-DAG: %[[VAL25:.+]] = select %[[VAL24]], %[[YMAX]], %[[VAL23]]
+ // CHECK-DAG: %[[VAL25:.+]] = arith.select %[[VAL24]], %[[YMAX]], %[[VAL23]]
// CHECK-DAG: %[[VAL26:.+]] = arith.cmpi slt, %[[VAL21]], %[[XYMIN]]
- // CHECK-DAG: %[[VAL27:.+]] = select %[[VAL26]], %[[XYMIN]], %[[VAL21]]
+ // CHECK-DAG: %[[VAL27:.+]] = arith.select %[[VAL26]], %[[XYMIN]], %[[VAL21]]
// CHECK-DAG: %[[VAL28:.+]] = arith.cmpi slt, %[[XMAX]], %[[VAL21]]
- // CHECK-DAG: %[[VAL29:.+]] = select %[[VAL28]], %[[XMAX]], %[[VAL27]]
+ // CHECK-DAG: %[[VAL29:.+]] = arith.select %[[VAL28]], %[[XMAX]], %[[VAL27]]
// Extract the nearest value using the computed indices.
@@ -1646,24 +1646,24 @@ func @resize_bilinear(%input: tensor<1x2x2x1xf32>) -> () {
// Bound check each dimension.
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[Y0]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[Y0]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[Y0]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[YMAX]], %[[Y0]]
- // CHECK: %[[YLO:.+]] = select %[[PRED]], %[[YMAX]], %[[BOUND]]
+ // CHECK: %[[YLO:.+]] = arith.select %[[PRED]], %[[YMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[Y1]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[Y1]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[Y1]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[YMAX]], %[[Y1]]
- // CHECK: %[[YHI:.+]] = select %[[PRED]], %[[YMAX]], %[[BOUND]]
+ // CHECK: %[[YHI:.+]] = arith.select %[[PRED]], %[[YMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[X0]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[X0]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[X0]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[XMAX]], %[[X0]]
- // CHECK: %[[XLO:.+]] = select %[[PRED]], %[[XMAX]], %[[BOUND]]
+ // CHECK: %[[XLO:.+]] = arith.select %[[PRED]], %[[XMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[X1]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[X1]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[X1]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[XMAX]], %[[X1]]
- // CHECK: %[[XHI:.+]] = select %[[PRED]], %[[XMAX]], %[[BOUND]]
+ // CHECK: %[[XHI:.+]] = arith.select %[[PRED]], %[[XMAX]], %[[BOUND]]
// Extract each corner of the bilinear interpolation.
@@ -1738,21 +1738,21 @@ func @resize_nearest_int(%input: tensor<1x2x2x1xi32>) -> () {
// CHECK-DAG: %[[VAL17:.+]] = arith.cmpi sge, %[[VAL13]], %[[ROUND]]
// CHECK-DAG: %[[ZERO:.+]] = arith.constant 0
// CHECK-DAG: %[[ONE:.+]] = arith.constant 1
- // CHECK-DAG: %[[VAL18:.+]] = select %[[VAL16]], %[[ONE]], %[[ZERO]]
- // CHECK-DAG: %[[VAL19:.+]] = select %[[VAL17]], %[[ONE]], %[[ZERO]]
+ // CHECK-DAG: %[[VAL18:.+]] = arith.select %[[VAL16]], %[[ONE]], %[[ZERO]]
+ // CHECK-DAG: %[[VAL19:.+]] = arith.select %[[VAL17]], %[[ONE]], %[[ZERO]]
// CHECK-DAG: %[[VAL20:.+]] = arith.addi %[[VAL8]], %[[VAL18]]
// CHECK-DAG: %[[VAL21:.+]] = arith.addi %[[VAL9]], %[[VAL19]]
// This section applies bound checking to be within the input image.
// CHECK-DAG: %[[VAL22:.+]] = arith.cmpi slt, %[[VAL20]], %[[XYMIN]]
- // CHECK-DAG: %[[VAL23:.+]] = select %[[VAL22]], %[[XYMIN]], %[[VAL20]]
+ // CHECK-DAG: %[[VAL23:.+]] = arith.select %[[VAL22]], %[[XYMIN]], %[[VAL20]]
// CHECK-DAG: %[[VAL24:.+]] = arith.cmpi slt, %[[YMAX]], %[[VAL20]]
- // CHECK-DAG: %[[VAL25:.+]] = select %[[VAL24]], %[[YMAX]], %[[VAL23]]
+ // CHECK-DAG: %[[VAL25:.+]] = arith.select %[[VAL24]], %[[YMAX]], %[[VAL23]]
// CHECK-DAG: %[[VAL26:.+]] = arith.cmpi slt, %[[VAL21]], %[[XYMIN]]
- // CHECK-DAG: %[[VAL27:.+]] = select %[[VAL26]], %[[XYMIN]], %[[VAL21]]
+ // CHECK-DAG: %[[VAL27:.+]] = arith.select %[[VAL26]], %[[XYMIN]], %[[VAL21]]
// CHECK-DAG: %[[VAL28:.+]] = arith.cmpi slt, %[[XMAX]], %[[VAL21]]
- // CHECK-DAG: %[[VAL29:.+]] = select %[[VAL28]], %[[XMAX]], %[[VAL27]]
+ // CHECK-DAG: %[[VAL29:.+]] = arith.select %[[VAL28]], %[[XMAX]], %[[VAL27]]
// Extract the nearest value using the computed indices.
@@ -1794,24 +1794,24 @@ func @resize_bilinear_int(%input: tensor<1x2x2x1xi8>) -> () {
// Bound check each dimension.
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[Y0]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[Y0]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[Y0]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[YMAX]], %[[Y0]]
- // CHECK: %[[YLO:.+]] = select %[[PRED]], %[[YMAX]], %[[BOUND]]
+ // CHECK: %[[YLO:.+]] = arith.select %[[PRED]], %[[YMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[Y1]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[Y1]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[Y1]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[YMAX]], %[[Y1]]
- // CHECK: %[[YHI:.+]] = select %[[PRED]], %[[YMAX]], %[[BOUND]]
+ // CHECK: %[[YHI:.+]] = arith.select %[[PRED]], %[[YMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[X0]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[X0]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[X0]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[XMAX]], %[[X0]]
- // CHECK: %[[XLO:.+]] = select %[[PRED]], %[[XMAX]], %[[BOUND]]
+ // CHECK: %[[XLO:.+]] = arith.select %[[PRED]], %[[XMAX]], %[[BOUND]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[X1]], %[[XYMIN]]
- // CHECK: %[[BOUND:.+]] = select %[[PRED]], %[[XYMIN]], %[[X1]]
+ // CHECK: %[[BOUND:.+]] = arith.select %[[PRED]], %[[XYMIN]], %[[X1]]
// CHECK: %[[PRED:.+]] = arith.cmpi slt, %[[XMAX]], %[[X1]]
- // CHECK: %[[XHI:.+]] = select %[[PRED]], %[[XMAX]], %[[BOUND]]
+ // CHECK: %[[XHI:.+]] = arith.select %[[PRED]], %[[XMAX]], %[[BOUND]]
// Extract each corner of the bilinear interpolation.
diff --git a/mlir/test/Conversion/TosaToStandard/tosa-to-standard.mlir b/mlir/test/Conversion/TosaToStandard/tosa-to-standard.mlir
index b346f43c37d9c..99b476b3d0f55 100644
--- a/mlir/test/Conversion/TosaToStandard/tosa-to-standard.mlir
+++ b/mlir/test/Conversion/TosaToStandard/tosa-to-standard.mlir
@@ -37,10 +37,10 @@ func @apply_scale_test_i32(%arg0 : i32, %arg1 : i32, %arg2 : i8) -> (i32) {
// CHECK-DAG: [[POSITIVE_ROUND:%.+]] = arith.addi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[NEGATIVE_ROUND:%.+]] = arith.subi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[VALUE_NEGATIVE:%.+]] = arith.cmpi sge, %arg0, [[C0_32]] : i32
- // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : i64
+ // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = arith.select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : i64
// CHECK-DAG: [[C32_32:%.+]] = arith.constant 32 : i32
// CHECK-DAG: [[IS_32BIT_SHIFT:%.+]] = arith.cmpi sge, [[SHIFT_32]], [[C32_32]]
- // CHECK-DAG: [[ROUND:%.+]] = select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
+ // CHECK-DAG: [[ROUND:%.+]] = arith.select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
// CHECK-DAG: [[VAL_64:%.+]] = arith.extsi %arg0 : i32 to i64
// CHECK-DAG: [[MULTIPLY_64:%.+]] = arith.extsi %arg1 : i32 to i64
@@ -74,10 +74,10 @@ func @apply_scale_test_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>, %arg
// CHECK-DAG: [[POSITIVE_ROUND:%.+]] = arith.addi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[NEGATIVE_ROUND:%.+]] = arith.subi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[VALUE_NEGATIVE:%.+]] = arith.cmpi sge, %arg0, [[C0_32]] : vector<4xi32>
- // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : vector<4xi1>, vector<4xi64>
+ // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = arith.select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : vector<4xi1>, vector<4xi64>
// CHECK-DAG: [[C32_32:%.+]] = arith.constant dense<32> : vector<4xi32>
// CHECK-DAG: [[IS_32BIT_SHIFT:%.+]] = arith.cmpi sge, [[SHIFT_32]], [[C32_32]]
- // CHECK-DAG: [[ROUND:%.+]] = select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
+ // CHECK-DAG: [[ROUND:%.+]] = arith.select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
// CHECK-DAG: [[VAL_64:%.+]] = arith.extsi %arg0 : vector<4xi32> to vector<4xi64>
// CHECK-DAG: [[MULTIPLY_64:%.+]] = arith.extsi %arg1 : vector<4xi32> to vector<4xi64>
@@ -110,9 +110,9 @@ func @apply_scale_test_i48(%arg0 : i48, %arg1 : i32, %arg2 : i8) -> (i32) {
// CHECK-DAG: [[POSITIVE_ROUND:%.+]] = arith.addi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[NEGATIVE_ROUND:%.+]] = arith.subi [[SHIFTED_64]], [[SECOND_BIAS_64]]
// CHECK-DAG: [[VALUE_NEGATIVE:%.+]] = arith.cmpi sge, %arg0, [[C0_32]] : i48
- // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : i64
+ // CHECK-DAG: [[DOUBLE_ROUNDED:%.+]] = arith.select [[VALUE_NEGATIVE]], [[POSITIVE_ROUND]], [[NEGATIVE_ROUND]] : i64
// CHECK-DAG: [[IS_32BIT_SHIFT:%.+]] = arith.cmpi sge, [[SHIFT_32]], [[C32_32]]
- // CHECK-DAG: [[ROUND:%.+]] = select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
+ // CHECK-DAG: [[ROUND:%.+]] = arith.select [[IS_32BIT_SHIFT]], [[DOUBLE_ROUNDED]], [[SHIFTED_64]]
// CHECK-DAG: [[VAL_64:%.+]] = arith.extsi %arg0 : i48 to i64
// CHECK-DAG: [[MULTIPLY_64:%.+]] = arith.extsi %arg1 : i32 to i64
// CHECK-DAG: [[SHIFT_64:%.+]] = arith.extsi %arg2 : i8 to i64
diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
index 024184408ae84..09593b9705328 100644
--- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
+++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir
@@ -476,7 +476,7 @@ func @vecdim_reduction_masked(%in: memref<256x512xf32>, %out: memref<256xf32>) {
// CHECK: %[[mask:.*]] = vector.create_mask %[[elems_left]] : vector<128xi1>
// CHECK: %[[ld:.*]] = vector.transfer_read %{{.*}} : memref<256x512xf32>, vector<128xf32>
// CHECK: %[[add:.*]] = arith.addf %[[red_iter]], %[[ld]] : vector<128xf32>
-// CHECK: %[[new_acc:.*]] = select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_acc:.*]] = arith.select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
// CHECK: affine.yield %[[new_acc]] : vector<128xf32>
// CHECK: }
// CHECK: %[[final_sum:.*]] = vector.reduction "add", %[[vred:.*]] : vector<128xf32> into f32
@@ -509,7 +509,7 @@ func @vecdim_reduction_masked_unknown_ub(%in: memref<256x512xf32>, %out: memref<
// CHECK: %[[mask:.*]] = vector.create_mask %[[elems_left]] : vector<128xi1>
// CHECK: %[[ld:.*]] = vector.transfer_read %{{.*}} : memref<256x512xf32>, vector<128xf32>
// CHECK: %[[add:.*]] = arith.addf %[[red_iter]], %[[ld]] : vector<128xf32>
-// CHECK: %[[new_acc:.*]] = select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_acc:.*]] = arith.select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
// CHECK: affine.yield %[[new_acc]] : vector<128xf32>
// CHECK: }
// CHECK: %[[final_sum:.*]] = vector.reduction "add", %[[vred:.*]] : vector<128xf32> into f32
@@ -562,7 +562,7 @@ func @vecdim_reduction_masked_unknown_lb(%in: memref<256x512xf32>, %out: memref<
// CHECK: %[[mask:.*]] = vector.create_mask %[[elems_left]] : vector<128xi1>
// CHECK: %[[ld:.*]] = vector.transfer_read %{{.*}} : memref<256x512xf32>, vector<128xf32>
// CHECK: %[[add:.*]] = arith.addf %[[red_iter]], %[[ld]] : vector<128xf32>
-// CHECK: %[[new_acc:.*]] = select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_acc:.*]] = arith.select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
// CHECK: affine.yield %[[new_acc]] : vector<128xf32>
// -----
@@ -591,7 +591,7 @@ func @vecdim_reduction_complex_ub(%in: memref<256x512xf32>, %out: memref<256xf32
// CHECK: %[[mask:.*]] = vector.create_mask %[[elems_left]] : vector<128xi1>
// CHECK: %[[ld:.*]] = vector.transfer_read %{{.*}} : memref<256x512xf32>, vector<128xf32>
// CHECK: %[[add:.*]] = arith.addf %[[red_iter]], %[[ld]] : vector<128xf32>
-// CHECK: %[[new_acc:.*]] = select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_acc:.*]] = arith.select %[[mask]], %[[add]], %[[red_iter]] : vector<128xi1>, vector<128xf32>
// CHECK: affine.yield %[[new_acc]] : vector<128xf32>
// -----
@@ -624,7 +624,7 @@ func @vecdim_two_reductions_masked(%in: memref<256x512xf32>, %out: memref<512xf3
// CHECK: %[[exp:.*]] = math.exp %[[ld]] : vector<128xf32>
// CHECK: %[[add:.*]] = arith.addf %[[sum_iter]], %[[ld]] : vector<128xf32>
// CHECK: %[[eadd:.*]] = arith.addf %[[esum_iter]], %[[exp]] : vector<128xf32>
-// CHECK: %[[new_acc:.*]] = select %[[mask]], %[[add]], %[[sum_iter]] : vector<128xi1>, vector<128xf32>
-// CHECK: %[[new_eacc:.*]] = select %[[mask]], %[[eadd]], %[[esum_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_acc:.*]] = arith.select %[[mask]], %[[add]], %[[sum_iter]] : vector<128xi1>, vector<128xf32>
+// CHECK: %[[new_eacc:.*]] = arith.select %[[mask]], %[[eadd]], %[[esum_iter]] : vector<128xi1>, vector<128xf32>
// CHECK: affine.yield %[[new_acc]], %[[new_eacc]] : vector<128xf32>
// CHECK: }
diff --git a/mlir/test/Dialect/Affine/parallelize.mlir b/mlir/test/Dialect/Affine/parallelize.mlir
index e01a8048e745c..e9ecc7b122106 100644
--- a/mlir/test/Dialect/Affine/parallelize.mlir
+++ b/mlir/test/Dialect/Affine/parallelize.mlir
@@ -27,7 +27,7 @@ func @reduce_window_max() {
%2 = affine.load %0[%arg0, %arg1, %arg2, %arg3] : memref<1x8x8x64xf32>
%3 = affine.load %1[%arg0 + %arg4, %arg1 * 2 + %arg5, %arg2 * 2 + %arg6, %arg3 + %arg7] : memref<1x18x18x64xf32>
%4 = arith.cmpf ogt, %2, %3 : f32
- %5 = select %4, %2, %3 : f32
+ %5 = arith.select %4, %2, %3 : f32
affine.store %5, %0[%arg0, %arg1, %arg2, %arg3] : memref<1x8x8x64xf32>
}
}
@@ -63,7 +63,7 @@ func @reduce_window_max() {
// CHECK: %[[lhs:.*]] = affine.load %[[v0]][%[[a0]], %[[a1]], %[[a2]], %[[a3]]] : memref<1x8x8x64xf32>
// CHECK: %[[rhs:.*]] = affine.load %[[v1]][%[[a0]] + %[[a4]], %[[a1]] * 2 + %[[a5]], %[[a2]] * 2 + %[[a6]], %[[a3]] + %[[a7]]] : memref<1x18x18x64xf32>
// CHECK: %[[res:.*]] = arith.cmpf ogt, %[[lhs]], %[[rhs]] : f32
-// CHECK: %[[sel:.*]] = select %[[res]], %[[lhs]], %[[rhs]] : f32
+// CHECK: %[[sel:.*]] = arith.select %[[res]], %[[lhs]], %[[rhs]] : f32
// CHECK: affine.store %[[sel]], %[[v0]][%[[a0]], %[[a1]], %[[a2]], %[[a3]]] : memref<1x8x8x64xf32>
// CHECK: }
// CHECK: }
diff --git a/mlir/test/Dialect/Arithmetic/bufferize.mlir b/mlir/test/Dialect/Arithmetic/bufferize.mlir
index f39d8a46a0934..7dc1384bfb219 100644
--- a/mlir/test/Dialect/Arithmetic/bufferize.mlir
+++ b/mlir/test/Dialect/Arithmetic/bufferize.mlir
@@ -80,3 +80,19 @@ func @non_tensor() {
}
// CHECK: }
+
+// -----
+
+// CHECK-LABEL: func @select(
+// CHECK-SAME: %[[PRED:.*]]: i1,
+// CHECK-SAME: %[[TRUE_VAL:.*]]: tensor<f32>,
+// CHECK-SAME: %[[FALSE_VAL:.*]]: tensor<f32>) -> tensor<f32> {
+// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : memref<f32>
+// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : memref<f32>
+// CHECK: %[[RET_MEMREF:.*]] = arith.select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref<f32>
+// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref<f32>
+// CHECK: return %[[RET]] : tensor<f32>
+func @select(%arg0: i1, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
+ %0 = arith.select %arg0, %arg1, %arg2 : tensor<f32>
+ return %0 : tensor<f32>
+}
diff --git a/mlir/test/Dialect/Arithmetic/expand-ops.mlir b/mlir/test/Dialect/Arithmetic/expand-ops.mlir
index f4a557a02b205..4bb4ffa9f5b8a 100644
--- a/mlir/test/Dialect/Arithmetic/expand-ops.mlir
+++ b/mlir/test/Dialect/Arithmetic/expand-ops.mlir
@@ -11,7 +11,7 @@ func @ceildivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[ZERO:%.+]] = arith.constant 0 : i32
// CHECK: [[MINONE:%.+]] = arith.constant -1 : i32
// CHECK: [[CMP1:%.+]] = arith.cmpi sgt, [[ARG1]], [[ZERO]] : i32
-// CHECK: [[X:%.+]] = select [[CMP1]], [[MINONE]], [[ONE]] : i32
+// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[MINONE]], [[ONE]] : i32
// CHECK: [[TRUE1:%.+]] = arith.addi [[X]], [[ARG0]] : i32
// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : i32
// CHECK: [[TRUE3:%.+]] = arith.addi [[ONE]], [[TRUE2]] : i32
@@ -25,7 +25,7 @@ func @ceildivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MNEG]] : i1
// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MPOS]] : i1
// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = select [[CMP2]], [[TRUE3]], [[FALSE3]] : i32
+// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE3]] : i32
}
// -----
@@ -41,7 +41,7 @@ func @ceildivi_index(%arg0: index, %arg1: index) -> (index) {
// CHECK: [[ZERO:%.+]] = arith.constant 0 : index
// CHECK: [[MINONE:%.+]] = arith.constant -1 : index
// CHECK: [[CMP1:%.+]] = arith.cmpi sgt, [[ARG1]], [[ZERO]] : index
-// CHECK: [[X:%.+]] = select [[CMP1]], [[MINONE]], [[ONE]] : index
+// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[MINONE]], [[ONE]] : index
// CHECK: [[TRUE1:%.+]] = arith.addi [[X]], [[ARG0]] : index
// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : index
// CHECK: [[TRUE3:%.+]] = arith.addi [[ONE]], [[TRUE2]] : index
@@ -55,7 +55,7 @@ func @ceildivi_index(%arg0: index, %arg1: index) -> (index) {
// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MNEG]] : i1
// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MPOS]] : i1
// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = select [[CMP2]], [[TRUE3]], [[FALSE3]] : index
+// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE3]] : index
}
// -----
@@ -70,7 +70,7 @@ func @floordivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[ZERO:%.+]] = arith.constant 0 : i32
// CHECK: [[MIN1:%.+]] = arith.constant -1 : i32
// CHECK: [[CMP1:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : i32
-// CHECK: [[X:%.+]] = select [[CMP1]], [[ONE]], [[MIN1]] : i32
+// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[ONE]], [[MIN1]] : i32
// CHECK: [[TRUE1:%.+]] = arith.subi [[X]], [[ARG0]] : i32
// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : i32
// CHECK: [[TRUE3:%.+]] = arith.subi [[MIN1]], [[TRUE2]] : i32
@@ -82,7 +82,7 @@ func @floordivi(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MPOS]] : i1
// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MNEG]] : i1
// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = select [[CMP2]], [[TRUE3]], [[FALSE]] : i32
+// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE]] : i32
}
// -----
@@ -97,7 +97,7 @@ func @floordivi_index(%arg0: index, %arg1: index) -> (index) {
// CHECK: [[ZERO:%.+]] = arith.constant 0 : index
// CHECK: [[MIN1:%.+]] = arith.constant -1 : index
// CHECK: [[CMP1:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : index
-// CHECK: [[X:%.+]] = select [[CMP1]], [[ONE]], [[MIN1]] : index
+// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[ONE]], [[MIN1]] : index
// CHECK: [[TRUE1:%.+]] = arith.subi [[X]], [[ARG0]] : index
// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : index
// CHECK: [[TRUE3:%.+]] = arith.subi [[MIN1]], [[TRUE2]] : index
@@ -109,7 +109,7 @@ func @floordivi_index(%arg0: index, %arg1: index) -> (index) {
// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MPOS]] : i1
// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MNEG]] : i1
// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = select [[CMP2]], [[TRUE3]], [[FALSE]] : index
+// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE]] : index
}
// -----
@@ -126,7 +126,7 @@ func @ceildivui(%arg0: i32, %arg1: i32) -> (i32) {
// CHECK: [[SUB:%.+]] = arith.subi %arg0, [[ONE]] : i32
// CHECK: [[DIV:%.+]] = arith.divui [[SUB]], %arg1 : i32
// CHECK: [[REM:%.+]] = arith.addi [[DIV]], [[ONE]] : i32
-// CHECK: [[RES:%.+]] = select [[ISZERO]], [[ZERO]], [[REM]] : i32
+// CHECK: [[RES:%.+]] = arith.select [[ISZERO]], [[ZERO]], [[REM]] : i32
}
// -----
@@ -143,7 +143,7 @@ func @ceildivui_index(%arg0: index, %arg1: index) -> (index) {
// CHECK: [[SUB:%.+]] = arith.subi %arg0, [[ONE]] : index
// CHECK: [[DIV:%.+]] = arith.divui [[SUB]], %arg1 : index
// CHECK: [[REM:%.+]] = arith.addi [[DIV]], [[ONE]] : index
-// CHECK: [[RES:%.+]] = select [[ISZERO]], [[ZERO]], [[REM]] : index
+// CHECK: [[RES:%.+]] = arith.select [[ISZERO]], [[ZERO]], [[REM]] : index
}
// -----
@@ -155,9 +155,9 @@ func @maxf(%a: f32, %b: f32) -> f32 {
}
// CHECK-SAME: %[[LHS:.*]]: f32, %[[RHS:.*]]: f32)
// CHECK-NEXT: %[[CMP:.*]] = arith.cmpf ugt, %[[LHS]], %[[RHS]] : f32
-// CHECK-NEXT: %[[SELECT:.*]] = select %[[CMP]], %[[LHS]], %[[RHS]] : f32
+// CHECK-NEXT: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LHS]], %[[RHS]] : f32
// CHECK-NEXT: %[[IS_NAN:.*]] = arith.cmpf uno, %[[RHS]], %[[RHS]] : f32
-// CHECK-NEXT: %[[RESULT:.*]] = select %[[IS_NAN]], %[[RHS]], %[[SELECT]] : f32
+// CHECK-NEXT: %[[RESULT:.*]] = arith.select %[[IS_NAN]], %[[RHS]], %[[SELECT]] : f32
// CHECK-NEXT: return %[[RESULT]] : f32
// -----
@@ -169,9 +169,9 @@ func @maxf_vector(%a: vector<4xf16>, %b: vector<4xf16>) -> vector<4xf16> {
}
// CHECK-SAME: %[[LHS:.*]]: vector<4xf16>, %[[RHS:.*]]: vector<4xf16>)
// CHECK-NEXT: %[[CMP:.*]] = arith.cmpf ugt, %[[LHS]], %[[RHS]] : vector<4xf16>
-// CHECK-NEXT: %[[SELECT:.*]] = select %[[CMP]], %[[LHS]], %[[RHS]]
+// CHECK-NEXT: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LHS]], %[[RHS]]
// CHECK-NEXT: %[[IS_NAN:.*]] = arith.cmpf uno, %[[RHS]], %[[RHS]] : vector<4xf16>
-// CHECK-NEXT: %[[RESULT:.*]] = select %[[IS_NAN]], %[[RHS]], %[[SELECT]]
+// CHECK-NEXT: %[[RESULT:.*]] = arith.select %[[IS_NAN]], %[[RHS]], %[[SELECT]]
// CHECK-NEXT: return %[[RESULT]] : vector<4xf16>
// -----
@@ -183,9 +183,9 @@ func @minf(%a: f32, %b: f32) -> f32 {
}
// CHECK-SAME: %[[LHS:.*]]: f32, %[[RHS:.*]]: f32)
// CHECK-NEXT: %[[CMP:.*]] = arith.cmpf ult, %[[LHS]], %[[RHS]] : f32
-// CHECK-NEXT: %[[SELECT:.*]] = select %[[CMP]], %[[LHS]], %[[RHS]] : f32
+// CHECK-NEXT: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LHS]], %[[RHS]] : f32
// CHECK-NEXT: %[[IS_NAN:.*]] = arith.cmpf uno, %[[RHS]], %[[RHS]] : f32
-// CHECK-NEXT: %[[RESULT:.*]] = select %[[IS_NAN]], %[[RHS]], %[[SELECT]] : f32
+// CHECK-NEXT: %[[RESULT:.*]] = arith.select %[[IS_NAN]], %[[RHS]], %[[SELECT]] : f32
// CHECK-NEXT: return %[[RESULT]] : f32
diff --git a/mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir b/mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir
index c4edca63fbd69..217e63bd67adf 100644
--- a/mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir
+++ b/mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir
@@ -101,7 +101,7 @@ func @sink_constant_step(%arg0: memref<?x10xf32>, %lb: index, %ub: index) {
// CHECK-SAME: %[[MEMREF:arg[0-9]+]]: memref<?x10xf32>
// CHECK-SAME: ) {
// CHECK: scf.for %[[I:arg[0-9]+]]
-// CHECK: select
+// CHECK: arith.select
// CHECK: scf.for %[[J:arg[0-9]+]]
// CHECK: memref.store
@@ -122,5 +122,5 @@ func @sink_constant_step(%arg0: memref<?x10xf32>, %lb: index, %ub: index) {
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[C10:.*]] = arith.constant 10 : index
// CHECK: scf.for %[[I:arg[0-9]+]]
-// CHECK-NOT: select
+// CHECK-NOT: arith.select
// CHECK: scf.for %[[J:arg[0-9]+]] = %c0 to %c10 step %c1
diff --git a/mlir/test/Dialect/Async/async-parallel-for-num-worker-threads.mlir b/mlir/test/Dialect/Async/async-parallel-for-num-worker-threads.mlir
index 04cfeabfb5828..8272316e0998e 100644
--- a/mlir/test/Dialect/Async/async-parallel-for-num-worker-threads.mlir
+++ b/mlir/test/Dialect/Async/async-parallel-for-num-worker-threads.mlir
@@ -18,15 +18,15 @@ func @num_worker_threads(%arg0: memref<?xf32>) {
// CHECK: %[[scalingCst64:.*]] = arith.constant 6.000000e-01 : f32
// CHECK: %[[workersIndex:.*]] = async.runtime.num_worker_threads : index
// CHECK: %[[inBracket4:.*]] = arith.cmpi sgt, %[[workersIndex]], %[[bracketLowerBound4]] : index
- // CHECK: %[[scalingFactor4:.*]] = select %[[inBracket4]], %[[scalingCst4]], %[[scalingCstInit]] : f32
+ // CHECK: %[[scalingFactor4:.*]] = arith.select %[[inBracket4]], %[[scalingCst4]], %[[scalingCstInit]] : f32
// CHECK: %[[inBracket8:.*]] = arith.cmpi sgt, %[[workersIndex]], %[[bracketLowerBound8]] : index
- // CHECK: %[[scalingFactor8:.*]] = select %[[inBracket8]], %[[scalingCst8]], %[[scalingFactor4]] : f32
+ // CHECK: %[[scalingFactor8:.*]] = arith.select %[[inBracket8]], %[[scalingCst8]], %[[scalingFactor4]] : f32
// CHECK: %[[inBracket16:.*]] = arith.cmpi sgt, %[[workersIndex]], %[[bracketLowerBound16]] : index
- // CHECK: %[[scalingFactor16:.*]] = select %[[inBracket16]], %[[scalingCst16]], %[[scalingFactor8]] : f32
+ // CHECK: %[[scalingFactor16:.*]] = arith.select %[[inBracket16]], %[[scalingCst16]], %[[scalingFactor8]] : f32
// CHECK: %[[inBracket32:.*]] = arith.cmpi sgt, %[[workersIndex]], %[[bracketLowerBound32]] : index
- // CHECK: %[[scalingFactor32:.*]] = select %[[inBracket32]], %[[scalingCst32]], %[[scalingFactor16]] : f32
+ // CHECK: %[[scalingFactor32:.*]] = arith.select %[[inBracket32]], %[[scalingCst32]], %[[scalingFactor16]] : f32
// CHECK: %[[inBracket64:.*]] = arith.cmpi sgt, %[[workersIndex]], %[[bracketLowerBound64]] : index
- // CHECK: %[[scalingFactor64:.*]] = select %[[inBracket64]], %[[scalingCst64]], %[[scalingFactor32]] : f32
+ // CHECK: %[[scalingFactor64:.*]] = arith.select %[[inBracket64]], %[[scalingCst64]], %[[scalingFactor32]] : f32
// CHECK: %[[workersInt:.*]] = arith.index_cast %[[workersIndex]] : index to i32
// CHECK: %[[workersFloat:.*]] = arith.sitofp %[[workersInt]] : i32 to f32
// CHECK: %[[scaledFloat:.*]] = arith.mulf %[[scalingFactor64]], %[[workersFloat]] : f32
diff --git a/mlir/test/Dialect/GPU/all-reduce-max.mlir b/mlir/test/Dialect/GPU/all-reduce-max.mlir
index 952a936b6739b..e8ea4fb6f5635 100644
--- a/mlir/test/Dialect/GPU/all-reduce-max.mlir
+++ b/mlir/test/Dialect/GPU/all-reduce-max.mlir
@@ -45,7 +45,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_35]], ^bb2, ^bb3
// CHECK: ^bb2:
// CHECK: [[VAL_36:%.*]] = arith.cmpf ugt, [[VAL_0]], [[VAL_34]] : f32
- // CHECK: [[VAL_37:%.*]] = select [[VAL_36]], [[VAL_0]], [[VAL_34]] : f32
+ // CHECK: [[VAL_37:%.*]] = arith.select [[VAL_36]], [[VAL_0]], [[VAL_34]] : f32
// CHECK: br ^bb4([[VAL_37]] : f32)
// CHECK: ^bb3:
// CHECK: br ^bb4([[VAL_0]] : f32)
@@ -54,7 +54,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_40]], ^bb5, ^bb6
// CHECK: ^bb5:
// CHECK: [[VAL_41:%.*]] = arith.cmpf ugt, [[VAL_38]], [[VAL_39]] : f32
- // CHECK: [[VAL_42:%.*]] = select [[VAL_41]], [[VAL_38]], [[VAL_39]] : f32
+ // CHECK: [[VAL_42:%.*]] = arith.select [[VAL_41]], [[VAL_38]], [[VAL_39]] : f32
// CHECK: br ^bb7([[VAL_42]] : f32)
// CHECK: ^bb6:
// CHECK: br ^bb7([[VAL_38]] : f32)
@@ -63,7 +63,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_45]], ^bb8, ^bb9
// CHECK: ^bb8:
// CHECK: [[VAL_46:%.*]] = arith.cmpf ugt, [[VAL_43]], [[VAL_44]] : f32
- // CHECK: [[VAL_47:%.*]] = select [[VAL_46]], [[VAL_43]], [[VAL_44]] : f32
+ // CHECK: [[VAL_47:%.*]] = arith.select [[VAL_46]], [[VAL_43]], [[VAL_44]] : f32
// CHECK: br ^bb10([[VAL_47]] : f32)
// CHECK: ^bb9:
// CHECK: br ^bb10([[VAL_43]] : f32)
@@ -72,7 +72,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_50]], ^bb11, ^bb12
// CHECK: ^bb11:
// CHECK: [[VAL_51:%.*]] = arith.cmpf ugt, [[VAL_48]], [[VAL_49]] : f32
- // CHECK: [[VAL_52:%.*]] = select [[VAL_51]], [[VAL_48]], [[VAL_49]] : f32
+ // CHECK: [[VAL_52:%.*]] = arith.select [[VAL_51]], [[VAL_48]], [[VAL_49]] : f32
// CHECK: br ^bb13([[VAL_52]] : f32)
// CHECK: ^bb12:
// CHECK: br ^bb13([[VAL_48]] : f32)
@@ -81,7 +81,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_55]], ^bb14, ^bb15
// CHECK: ^bb14:
// CHECK: [[VAL_56:%.*]] = arith.cmpf ugt, [[VAL_53]], [[VAL_54]] : f32
- // CHECK: [[VAL_57:%.*]] = select [[VAL_56]], [[VAL_53]], [[VAL_54]] : f32
+ // CHECK: [[VAL_57:%.*]] = arith.select [[VAL_56]], [[VAL_53]], [[VAL_54]] : f32
// CHECK: br ^bb16([[VAL_57]] : f32)
// CHECK: ^bb15:
// CHECK: br ^bb16([[VAL_53]] : f32)
@@ -90,19 +90,19 @@ gpu.module @kernels {
// CHECK: ^bb17:
// CHECK: [[VAL_59:%.*]], [[VAL_60:%.*]] = gpu.shuffle xor [[VAL_0]], [[VAL_6]], [[VAL_5]] : f32
// CHECK: [[VAL_61:%.*]] = arith.cmpf ugt, [[VAL_0]], [[VAL_59]] : f32
- // CHECK: [[VAL_62:%.*]] = select [[VAL_61]], [[VAL_0]], [[VAL_59]] : f32
+ // CHECK: [[VAL_62:%.*]] = arith.select [[VAL_61]], [[VAL_0]], [[VAL_59]] : f32
// CHECK: [[VAL_63:%.*]], [[VAL_64:%.*]] = gpu.shuffle xor [[VAL_62]], [[VAL_7]], [[VAL_5]] : f32
// CHECK: [[VAL_65:%.*]] = arith.cmpf ugt, [[VAL_62]], [[VAL_63]] : f32
- // CHECK: [[VAL_66:%.*]] = select [[VAL_65]], [[VAL_62]], [[VAL_63]] : f32
+ // CHECK: [[VAL_66:%.*]] = arith.select [[VAL_65]], [[VAL_62]], [[VAL_63]] : f32
// CHECK: [[VAL_67:%.*]], [[VAL_68:%.*]] = gpu.shuffle xor [[VAL_66]], [[VAL_8]], [[VAL_5]] : f32
// CHECK: [[VAL_69:%.*]] = arith.cmpf ugt, [[VAL_66]], [[VAL_67]] : f32
- // CHECK: [[VAL_70:%.*]] = select [[VAL_69]], [[VAL_66]], [[VAL_67]] : f32
+ // CHECK: [[VAL_70:%.*]] = arith.select [[VAL_69]], [[VAL_66]], [[VAL_67]] : f32
// CHECK: [[VAL_71:%.*]], [[VAL_72:%.*]] = gpu.shuffle xor [[VAL_70]], [[VAL_9]], [[VAL_5]] : f32
// CHECK: [[VAL_73:%.*]] = arith.cmpf ugt, [[VAL_70]], [[VAL_71]] : f32
- // CHECK: [[VAL_74:%.*]] = select [[VAL_73]], [[VAL_70]], [[VAL_71]] : f32
+ // CHECK: [[VAL_74:%.*]] = arith.select [[VAL_73]], [[VAL_70]], [[VAL_71]] : f32
// CHECK: [[VAL_75:%.*]], [[VAL_76:%.*]] = gpu.shuffle xor [[VAL_74]], [[VAL_10]], [[VAL_5]] : f32
// CHECK: [[VAL_77:%.*]] = arith.cmpf ugt, [[VAL_74]], [[VAL_75]] : f32
- // CHECK: [[VAL_78:%.*]] = select [[VAL_77]], [[VAL_74]], [[VAL_75]] : f32
+ // CHECK: [[VAL_78:%.*]] = arith.select [[VAL_77]], [[VAL_74]], [[VAL_75]] : f32
// CHECK: br ^bb18([[VAL_78]] : f32)
// CHECK: ^bb18([[VAL_79:%.*]]: f32):
// CHECK: cond_br [[VAL_30]], ^bb19, ^bb20
@@ -129,7 +129,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_89]], ^bb24, ^bb25
// CHECK: ^bb24:
// CHECK: [[VAL_90:%.*]] = arith.cmpf ugt, [[VAL_86]], [[VAL_88]] : f32
- // CHECK: [[VAL_91:%.*]] = select [[VAL_90]], [[VAL_86]], [[VAL_88]] : f32
+ // CHECK: [[VAL_91:%.*]] = arith.select [[VAL_90]], [[VAL_86]], [[VAL_88]] : f32
// CHECK: br ^bb26([[VAL_91]] : f32)
// CHECK: ^bb25:
// CHECK: br ^bb26([[VAL_86]] : f32)
@@ -138,7 +138,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_94]], ^bb27, ^bb28
// CHECK: ^bb27:
// CHECK: [[VAL_95:%.*]] = arith.cmpf ugt, [[VAL_92]], [[VAL_93]] : f32
- // CHECK: [[VAL_96:%.*]] = select [[VAL_95]], [[VAL_92]], [[VAL_93]] : f32
+ // CHECK: [[VAL_96:%.*]] = arith.select [[VAL_95]], [[VAL_92]], [[VAL_93]] : f32
// CHECK: br ^bb29([[VAL_96]] : f32)
// CHECK: ^bb28:
// CHECK: br ^bb29([[VAL_92]] : f32)
@@ -147,7 +147,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_99]], ^bb30, ^bb31
// CHECK: ^bb30:
// CHECK: [[VAL_100:%.*]] = arith.cmpf ugt, [[VAL_97]], [[VAL_98]] : f32
- // CHECK: [[VAL_101:%.*]] = select [[VAL_100]], [[VAL_97]], [[VAL_98]] : f32
+ // CHECK: [[VAL_101:%.*]] = arith.select [[VAL_100]], [[VAL_97]], [[VAL_98]] : f32
// CHECK: br ^bb32([[VAL_101]] : f32)
// CHECK: ^bb31:
// CHECK: br ^bb32([[VAL_97]] : f32)
@@ -156,7 +156,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_104]], ^bb33, ^bb34
// CHECK: ^bb33:
// CHECK: [[VAL_105:%.*]] = arith.cmpf ugt, [[VAL_102]], [[VAL_103]] : f32
- // CHECK: [[VAL_106:%.*]] = select [[VAL_105]], [[VAL_102]], [[VAL_103]] : f32
+ // CHECK: [[VAL_106:%.*]] = arith.select [[VAL_105]], [[VAL_102]], [[VAL_103]] : f32
// CHECK: br ^bb35([[VAL_106]] : f32)
// CHECK: ^bb34:
// CHECK: br ^bb35([[VAL_102]] : f32)
@@ -165,7 +165,7 @@ gpu.module @kernels {
// CHECK: cond_br [[VAL_109]], ^bb36, ^bb37
// CHECK: ^bb36:
// CHECK: [[VAL_110:%.*]] = arith.cmpf ugt, [[VAL_107]], [[VAL_108]] : f32
- // CHECK: [[VAL_111:%.*]] = select [[VAL_110]], [[VAL_107]], [[VAL_108]] : f32
+ // CHECK: [[VAL_111:%.*]] = arith.select [[VAL_110]], [[VAL_107]], [[VAL_108]] : f32
// CHECK: br ^bb38([[VAL_111]] : f32)
// CHECK: ^bb37:
// CHECK: br ^bb38([[VAL_107]] : f32)
@@ -174,19 +174,19 @@ gpu.module @kernels {
// CHECK: ^bb39:
// CHECK: [[VAL_113:%.*]], [[VAL_114:%.*]] = gpu.shuffle xor [[VAL_86]], [[VAL_6]], [[VAL_5]] : f32
// CHECK: [[VAL_115:%.*]] = arith.cmpf ugt, [[VAL_86]], [[VAL_113]] : f32
- // CHECK: [[VAL_116:%.*]] = select [[VAL_115]], [[VAL_86]], [[VAL_113]] : f32
+ // CHECK: [[VAL_116:%.*]] = arith.select [[VAL_115]], [[VAL_86]], [[VAL_113]] : f32
// CHECK: [[VAL_117:%.*]], [[VAL_118:%.*]] = gpu.shuffle xor [[VAL_116]], [[VAL_7]], [[VAL_5]] : f32
// CHECK: [[VAL_119:%.*]] = arith.cmpf ugt, [[VAL_116]], [[VAL_117]] : f32
- // CHECK: [[VAL_120:%.*]] = select [[VAL_119]], [[VAL_116]], [[VAL_117]] : f32
+ // CHECK: [[VAL_120:%.*]] = arith.select [[VAL_119]], [[VAL_116]], [[VAL_117]] : f32
// CHECK: [[VAL_121:%.*]], [[VAL_122:%.*]] = gpu.shuffle xor [[VAL_120]], [[VAL_8]], [[VAL_5]] : f32
// CHECK: [[VAL_123:%.*]] = arith.cmpf ugt, [[VAL_120]], [[VAL_121]] : f32
- // CHECK: [[VAL_124:%.*]] = select [[VAL_123]], [[VAL_120]], [[VAL_121]] : f32
+ // CHECK: [[VAL_124:%.*]] = arith.select [[VAL_123]], [[VAL_120]], [[VAL_121]] : f32
// CHECK: [[VAL_125:%.*]], [[VAL_126:%.*]] = gpu.shuffle xor [[VAL_124]], [[VAL_9]], [[VAL_5]] : f32
// CHECK: [[VAL_127:%.*]] = arith.cmpf ugt, [[VAL_124]], [[VAL_125]] : f32
- // CHECK: [[VAL_128:%.*]] = select [[VAL_127]], [[VAL_124]], [[VAL_125]] : f32
+ // CHECK: [[VAL_128:%.*]] = arith.select [[VAL_127]], [[VAL_124]], [[VAL_125]] : f32
// CHECK: [[VAL_129:%.*]], [[VAL_130:%.*]] = gpu.shuffle xor [[VAL_128]], [[VAL_10]], [[VAL_5]] : f32
// CHECK: [[VAL_131:%.*]] = arith.cmpf ugt, [[VAL_128]], [[VAL_129]] : f32
- // CHECK: [[VAL_132:%.*]] = select [[VAL_131]], [[VAL_128]], [[VAL_129]] : f32
+ // CHECK: [[VAL_132:%.*]] = arith.select [[VAL_131]], [[VAL_128]], [[VAL_129]] : f32
// CHECK: br ^bb40([[VAL_132]] : f32)
// CHECK: ^bb40([[VAL_133:%.*]]: f32):
// CHECK: store [[VAL_133]], [[VAL_1]]{{\[}}[[VAL_4]]] : memref<32xf32, 3>
diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
index 9953900699367..920b04e3a1f72 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize-analysis.mlir
@@ -1724,9 +1724,9 @@ func @write_after_select_read_one(
%cst = arith.constant 0.0 : f32
%idx = arith.constant 0 : index
- // CHECK: select %{{.*}}, %[[t1]], %[[t2]]
+ // CHECK: arith.select %{{.*}}, %[[t1]], %[[t2]]
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "true"]}
- %s = std.select %c, %t1, %t2 : tensor<?xf32>
+ %s = arith.select %c, %t1, %t2 : tensor<?xf32>
// CHECK: tensor.insert
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "true", "none"]}
%w = tensor.insert %cst into %s[%idx] : tensor<?xf32>
@@ -1750,9 +1750,9 @@ func @write_after_select_read_both(
%cst = arith.constant 0.0 : f32
%idx = arith.constant 0 : index
- // CHECK: select %{{.*}}, %[[t1]], %[[t2]]
+ // CHECK: arith.select %{{.*}}, %[[t1]], %[[t2]]
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "false", "false"]}
- %s = std.select %c, %t1, %t2 : tensor<?xf32>
+ %s = arith.select %c, %t1, %t2 : tensor<?xf32>
// CHECK: tensor.insert
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "true", "none"]}
%w = tensor.insert %cst into %s[%idx] : tensor<?xf32>
@@ -1779,9 +1779,9 @@ func @write_after_select_no_conflict(
%cst = arith.constant 0.0 : f32
%idx = arith.constant 0 : index
- // CHECK: select %{{.*}}, %[[t1]], %[[t2]]
+ // CHECK: arith.select %{{.*}}, %[[t1]], %[[t2]]
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "true", "true"]}
- %s = std.select %c, %t1, %t2 : tensor<?xf32>
+ %s = arith.select %c, %t1, %t2 : tensor<?xf32>
// CHECK: tensor.insert
// CHECK-SAME: {__inplace_operands_attr__ = ["none", "true", "none"]}
%w = tensor.insert %cst into %s[%idx] : tensor<?xf32>
diff --git a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
index c8dcf748df11d..f0a48aafbcdb9 100644
--- a/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/comprehensive-module-bufferize.mlir
@@ -1009,7 +1009,7 @@ func @scf_if_non_equiv_yields(
%B : tensor<4xf32> {linalg.inplaceable = false})
-> tensor<4xf32>
{
- // CHECK: %[[r:.*]] = select %[[cond]], %[[A]], %[[B]]
+ // CHECK: %[[r:.*]] = arith.select %[[cond]], %[[A]], %[[B]]
%r = scf.if %b -> (tensor<4xf32>) {
scf.yield %A : tensor<4xf32>
} else {
@@ -1321,8 +1321,8 @@ func @write_to_select_op_source(
// CHECK: memref.copy %[[t1]], %[[alloc]]
// CHECK: memref.store %{{.*}}, %[[alloc]]
%w = tensor.insert %cst into %t1[%idx] : tensor<?xf32>
- // CHECK: %[[select:.*]] = select %{{.*}}, %[[t1]], %[[t2]]
- %s = std.select %c, %t1, %t2 : tensor<?xf32>
+ // CHECK: %[[select:.*]] = arith.select %{{.*}}, %[[t1]], %[[t2]]
+ %s = arith.select %c, %t1, %t2 : tensor<?xf32>
// CHECK: return %[[select]], %[[alloc]]
return %s, %w : tensor<?xf32>, tensor<?xf32>
}
@@ -1343,8 +1343,8 @@ func @write_after_select_read_one(
// CHECK: %[[alloc:.*]] = memref.alloc
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
// CHECK: memref.copy %[[t1]], %[[alloc]]
- // CHECK: %[[select:.*]] = select %{{.*}}, %[[casted]], %[[t2]]
- %s = std.select %c, %t1, %t2 : tensor<?xf32>
+ // CHECK: %[[select:.*]] = arith.select %{{.*}}, %[[casted]], %[[t2]]
+ %s = arith.select %c, %t1, %t2 : tensor<?xf32>
// CHECK: memref.store %{{.*}}, %[[select]]
%w = tensor.insert %cst into %s[%idx] : tensor<?xf32>
diff --git a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
index 394e33df321f8..cab6f5d5983bc 100644
--- a/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
+++ b/mlir/test/Dialect/Linalg/convert-elementwise-to-linalg.mlir
@@ -62,8 +62,8 @@ func @select(%arg0: tensor<i1>, %arg1: tensor<i32>, %arg2: tensor<i32>) -> tenso
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]], %[[ARG2]]
// CHECK-SAME: outs(%[[ARG1]]
// CHECK: ^bb0(%[[PRED:.*]]: i1, %[[TRUE_VAL:.*]]: i32, %[[FALSE_VAL:.*]]: i32, %{{.*}}: i32):
- // CHECK: select %[[PRED]], %[[TRUE_VAL]], %[[FALSE_VAL]] : i32
- %0 = select %arg0, %arg1, %arg2 : tensor<i1>, tensor<i32>
+ // CHECK: arith.select %[[PRED]], %[[TRUE_VAL]], %[[FALSE_VAL]] : i32
+ %0 = arith.select %arg0, %arg1, %arg2 : tensor<i1>, tensor<i32>
return %0 : tensor<i32>
}
diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir
index 4a3767b58b0f5..077d9d3421513 100644
--- a/mlir/test/Dialect/Linalg/loops.mlir
+++ b/mlir/test/Dialect/Linalg/loops.mlir
@@ -508,7 +508,7 @@ func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
%i = linalg.index 0 : index
%0 = arith.constant 0 : index
%1 = arith.cmpi eq, %0, %i : index
- %2 = select %1, %b, %c : f32
+ %2 = arith.select %1, %b, %c : f32
%3 = arith.addf %a, %2 : f32
linalg.yield %3 : f32
}
@@ -522,7 +522,7 @@ func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
// CHECK: %[[a:.*]] = memref.load %[[ARG0]][%[[i]]]
// CHECK: %[[b:.*]] = memref.load %[[ARG1]][]
// CHECK: %[[c:.*]] = memref.load %[[ARG2]][]
-// CHECK: %[[d:.*]] = select %{{.*}}, %[[b]], %[[c]]
+// CHECK: %[[d:.*]] = arith.select %{{.*}}, %[[b]], %[[c]]
// CHECK: %[[e:.*]] = arith.addf %[[a]], %[[d]]
// CHECK: store %[[e]], %[[ARG2]][]
@@ -534,7 +534,7 @@ func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
// CHECKPARALLEL: %[[a:.*]] = memref.load %[[ARG0]][%[[i]]]
// CHECKPARALLEL: %[[b:.*]] = memref.load %[[ARG1]][]
// CHECKPARALLEL: %[[c:.*]] = memref.load %[[ARG2]][]
-// CHECKPARALLEL: %[[d:.*]] = select %{{.*}}, %[[b]], %[[c]]
+// CHECKPARALLEL: %[[d:.*]] = arith.select %{{.*}}, %[[b]], %[[c]]
// CHECKPARALLEL: %[[e:.*]] = arith.addf %[[a]], %[[d]]
// CHECKPARALLEL: store %[[e]], %[[ARG2]][]
diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 3c9e338b91af0..0c70868cbc32c 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -220,12 +220,12 @@ func @generic_with_multiple_tensor_outputs(
outs(%1, %3 : tensor<i32>, tensor<i32>) {
^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32):
%5 = arith.cmpi sge, %arg3, %arg5 : i32
- %6 = select %5, %arg3, %arg5 : i32
+ %6 = arith.select %5, %arg3, %arg5 : i32
%7 = arith.cmpi eq, %arg3, %arg5 : i32
%8 = arith.cmpi slt, %arg4, %arg6 : i32
- %9 = select %8, %arg4, %arg6 : i32
- %10 = select %5, %arg4, %arg6 : i32
- %11 = select %7, %9, %10 : i32
+ %9 = arith.select %8, %arg4, %arg6 : i32
+ %10 = arith.select %5, %arg4, %arg6 : i32
+ %11 = arith.select %7, %9, %10 : i32
linalg.yield %6, %11 : i32, i32
} -> (tensor<i32>, tensor<i32>)
return %4#0, %4#1 : tensor<i32>, tensor<i32>
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index 3fe0c7a06a12f..49ec6e7033281 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -328,8 +328,8 @@ func @generic_vectorize(%arg0: memref<4x256xf32>,
%11 = arith.mulf %arg5, %8 : f32
// CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32>
%12 = math.rsqrt %arg5 : f32
- // CHECK: %[[SEL:.*]] = select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
- %13 = select %7, %arg5, %arg6 : f32
+ // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
+ %13 = arith.select %7, %arg5, %arg6 : f32
// CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32>
%14 = arith.subf %arg5, %arg4 : f32
// CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32>
@@ -406,8 +406,8 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
%11 = arith.mulf %arg5, %8 : f32
// CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32>
%12 = math.rsqrt %arg5 : f32
- // CHECK: %[[SEL:.*]] = select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
- %13 = select %7, %arg5, %arg6 : f32
+ // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32>
+ %13 = arith.select %7, %arg5, %arg6 : f32
// CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32>
%14 = arith.subf %arg5, %arg4 : f32
// CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32>
diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir
index a40cc9c6f037a..8655c1cf61b87 100644
--- a/mlir/test/Dialect/Math/polynomial-approximation.mlir
+++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir
@@ -39,27 +39,27 @@
// CHECK-DAG: %[[val_cst_28:.*]] = arith.constant 3.750000e+00 : f32
// CHECK: %[[val_0:.*]] = arith.cmpf olt, %[[val_arg0]], %[[val_cst]] : f32
// CHECK: %[[val_1:.*]] = arith.negf %[[val_arg0]] : f32
-// CHECK: %[[val_2:.*]] = select %[[val_0]], %[[val_1]], %[[val_arg0]] : f32
+// CHECK: %[[val_2:.*]] = arith.select %[[val_0]], %[[val_1]], %[[val_arg0]] : f32
// CHECK: %[[val_3:.*]] = arith.cmpf olt, %[[val_2]], %[[val_cst_26]] : f32
-// CHECK: %[[val_4:.*]] = select %[[val_3]], %[[val_cst_1]], %[[val_cst_5]] : f32
-// CHECK: %[[val_5:.*]] = select %[[val_3]], %[[val_cst_14]], %[[val_cst_18]] : f32
-// CHECK: %[[val_6:.*]] = select %[[val_3]], %[[val_cst_2]], %[[val_cst_6]] : f32
-// CHECK: %[[val_7:.*]] = select %[[val_3]], %[[val_cst_15]], %[[val_cst_19]] : f32
-// CHECK: %[[val_8:.*]] = select %[[val_3]], %[[val_cst_3]], %[[val_cst_7]] : f32
-// CHECK: %[[val_9:.*]] = select %[[val_3]], %[[val_cst_16]], %[[val_cst_20]] : f32
-// CHECK: %[[val_10:.*]] = select %[[val_3]], %[[val_cst_4]], %[[val_cst_8]] : f32
-// CHECK: %[[val_11:.*]] = select %[[val_3]], %[[val_cst_17]], %[[val_cst_21]] : f32
+// CHECK: %[[val_4:.*]] = arith.select %[[val_3]], %[[val_cst_1]], %[[val_cst_5]] : f32
+// CHECK: %[[val_5:.*]] = arith.select %[[val_3]], %[[val_cst_14]], %[[val_cst_18]] : f32
+// CHECK: %[[val_6:.*]] = arith.select %[[val_3]], %[[val_cst_2]], %[[val_cst_6]] : f32
+// CHECK: %[[val_7:.*]] = arith.select %[[val_3]], %[[val_cst_15]], %[[val_cst_19]] : f32
+// CHECK: %[[val_8:.*]] = arith.select %[[val_3]], %[[val_cst_3]], %[[val_cst_7]] : f32
+// CHECK: %[[val_9:.*]] = arith.select %[[val_3]], %[[val_cst_16]], %[[val_cst_20]] : f32
+// CHECK: %[[val_10:.*]] = arith.select %[[val_3]], %[[val_cst_4]], %[[val_cst_8]] : f32
+// CHECK: %[[val_11:.*]] = arith.select %[[val_3]], %[[val_cst_17]], %[[val_cst_21]] : f32
// CHECK: %[[val_12:.*]] = arith.cmpf olt, %[[val_2]], %[[val_cst_27]] : f32
-// CHECK: %[[val_13:.*]] = select %[[val_12]], %[[val_cst]], %[[val_cst_9]] : f32
-// CHECK: %[[val_14:.*]] = select %[[val_12]], %[[val_4]], %[[val_cst_10]] : f32
-// CHECK: %[[val_15:.*]] = select %[[val_12]], %[[val_5]], %[[val_cst_22]] : f32
-// CHECK: %[[val_16:.*]] = select %[[val_12]], %[[val_6]], %[[val_cst_11]] : f32
-// CHECK: %[[val_17:.*]] = select %[[val_12]], %[[val_7]], %[[val_cst_23]] : f32
-// CHECK: %[[val_18:.*]] = select %[[val_12]], %[[val_8]], %[[val_cst_12]] : f32
-// CHECK: %[[val_19:.*]] = select %[[val_12]], %[[val_9]], %[[val_cst_24]] : f32
-// CHECK: %[[val_20:.*]] = select %[[val_12]], %[[val_10]], %[[val_cst_13]] : f32
-// CHECK: %[[val_21:.*]] = select %[[val_12]], %[[val_11]], %[[val_cst_25]] : f32
-// CHECK: %[[val_22:.*]] = select %[[val_12]], %[[val_cst]], %[[val_cst_0]] : f32
+// CHECK: %[[val_13:.*]] = arith.select %[[val_12]], %[[val_cst]], %[[val_cst_9]] : f32
+// CHECK: %[[val_14:.*]] = arith.select %[[val_12]], %[[val_4]], %[[val_cst_10]] : f32
+// CHECK: %[[val_15:.*]] = arith.select %[[val_12]], %[[val_5]], %[[val_cst_22]] : f32
+// CHECK: %[[val_16:.*]] = arith.select %[[val_12]], %[[val_6]], %[[val_cst_11]] : f32
+// CHECK: %[[val_17:.*]] = arith.select %[[val_12]], %[[val_7]], %[[val_cst_23]] : f32
+// CHECK: %[[val_18:.*]] = arith.select %[[val_12]], %[[val_8]], %[[val_cst_12]] : f32
+// CHECK: %[[val_19:.*]] = arith.select %[[val_12]], %[[val_9]], %[[val_cst_24]] : f32
+// CHECK: %[[val_20:.*]] = arith.select %[[val_12]], %[[val_10]], %[[val_cst_13]] : f32
+// CHECK: %[[val_21:.*]] = arith.select %[[val_12]], %[[val_11]], %[[val_cst_25]] : f32
+// CHECK: %[[val_22:.*]] = arith.select %[[val_12]], %[[val_cst]], %[[val_cst_0]] : f32
// CHECK: %[[val_23:.*]] = arith.cmpf ult, %[[val_2]], %[[val_cst_28]] : f32
// CHECK: %[[val_24:.*]] = math.fma %[[val_2]], %[[val_20]], %[[val_18]] : f32
// CHECK: %[[val_25:.*]] = math.fma %[[val_2]], %[[val_24]], %[[val_16]] : f32
@@ -71,9 +71,9 @@
// CHECK: %[[val_31:.*]] = math.fma %[[val_2]], %[[val_30]], %[[val_cst_0]] : f32
// CHECK: %[[val_32:.*]] = arith.divf %[[val_27]], %[[val_31]] : f32
// CHECK: %[[val_33:.*]] = arith.addf %[[val_22]], %[[val_32]] : f32
-// CHECK: %[[val_34:.*]] = select %[[val_23]], %[[val_33]], %[[val_cst_0]] : f32
+// CHECK: %[[val_34:.*]] = arith.select %[[val_23]], %[[val_33]], %[[val_cst_0]] : f32
// CHECK: %[[val_35:.*]] = arith.negf %[[val_34]] : f32
-// CHECK: %[[val_36:.*]] = select %[[val_0]], %[[val_35]], %[[val_34]] : f32
+// CHECK: %[[val_36:.*]] = arith.select %[[val_0]], %[[val_35]], %[[val_34]] : f32
// CHECK: return %[[val_36]] : f32
// CHECK: }
func @erf_scalar(%arg0: f32) -> f32 {
@@ -86,7 +86,7 @@ func @erf_scalar(%arg0: f32) -> f32 {
// CHECK: %[[zero:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32>
// CHECK-NOT: erf
// CHECK-COUNT-20: select
-// CHECK: %[[res:.*]] = select
+// CHECK: %[[res:.*]] = arith.select
// CHECK: return %[[res]] : vector<8xf32>
// CHECK: }
func @erf_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -132,10 +132,10 @@ func @erf_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_34:.*]] = arith.cmpf oeq, %[[VAL_0]], %[[VAL_10]] : f32
// CHECK: %[[VAL_35:.*]] = arith.cmpf ogt, %[[VAL_0]], %[[VAL_9]] : f32
// CHECK: %[[VAL_36:.*]] = arith.andi %[[VAL_31]], %[[VAL_32]] : i1
-// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_10]], %[[VAL_12]] : f32
-// CHECK: %[[VAL_38:.*]] = select %[[VAL_36]], %[[VAL_30]], %[[VAL_37]] : f32
-// CHECK: %[[VAL_39:.*]] = select %[[VAL_34]], %[[VAL_10]], %[[VAL_38]] : f32
-// CHECK: %[[VAL_40:.*]] = select %[[VAL_33]], %[[VAL_9]], %[[VAL_39]] : f32
+// CHECK: %[[VAL_37:.*]] = arith.select %[[VAL_35]], %[[VAL_10]], %[[VAL_12]] : f32
+// CHECK: %[[VAL_38:.*]] = arith.select %[[VAL_36]], %[[VAL_30]], %[[VAL_37]] : f32
+// CHECK: %[[VAL_39:.*]] = arith.select %[[VAL_34]], %[[VAL_10]], %[[VAL_38]] : f32
+// CHECK: %[[VAL_40:.*]] = arith.select %[[VAL_33]], %[[VAL_9]], %[[VAL_39]] : f32
// CHECK: return %[[VAL_40]] : f32
func @exp_scalar(%arg0: f32) -> f32 {
%0 = math.exp %arg0 : f32
@@ -147,7 +147,7 @@ func @exp_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_1:.*]] = arith.constant dense<0.693147182> : vector<8xf32>
// CHECK-NOT: exp
// CHECK-COUNT-3: select
-// CHECK: %[[VAL_40:.*]] = select
+// CHECK: %[[VAL_40:.*]] = arith.select
// CHECK: return %[[VAL_40]] : vector<8xf32>
func @exp_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
%0 = math.exp %arg0 : vector<8xf32>
@@ -162,19 +162,19 @@ func @exp_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[BEGIN_EXP_X:.*]] = arith.mulf %[[X]], %[[CST_LOG2E]] : f32
// CHECK-NOT: exp
// CHECK-COUNT-3: select
-// CHECK: %[[EXP_X:.*]] = select
+// CHECK: %[[EXP_X:.*]] = arith.select
// CHECK: %[[VAL_58:.*]] = arith.cmpf oeq, %[[EXP_X]], %[[CST_ONE]] : f32
// CHECK: %[[VAL_59:.*]] = arith.subf %[[EXP_X]], %[[CST_ONE]] : f32
// CHECK: %[[VAL_60:.*]] = arith.cmpf oeq, %[[VAL_59]], %[[CST_MINUSONE]] : f32
// CHECK-NOT: log
// CHECK-COUNT-5: select
-// CHECK: %[[LOG_U:.*]] = select
+// CHECK: %[[LOG_U:.*]] = arith.select
// CHECK: %[[VAL_104:.*]] = arith.cmpf oeq, %[[LOG_U]], %[[EXP_X]] : f32
// CHECK: %[[VAL_105:.*]] = arith.divf %[[X]], %[[LOG_U]] : f32
// CHECK: %[[VAL_106:.*]] = arith.mulf %[[VAL_59]], %[[VAL_105]] : f32
-// CHECK: %[[VAL_107:.*]] = select %[[VAL_104]], %[[EXP_X]], %[[VAL_106]] : f32
-// CHECK: %[[VAL_108:.*]] = select %[[VAL_60]], %[[CST_MINUSONE]], %[[VAL_107]] : f32
-// CHECK: %[[VAL_109:.*]] = select %[[VAL_58]], %[[X]], %[[VAL_108]] : f32
+// CHECK: %[[VAL_107:.*]] = arith.select %[[VAL_104]], %[[EXP_X]], %[[VAL_106]] : f32
+// CHECK: %[[VAL_108:.*]] = arith.select %[[VAL_60]], %[[CST_MINUSONE]], %[[VAL_107]] : f32
+// CHECK: %[[VAL_109:.*]] = arith.select %[[VAL_58]], %[[X]], %[[VAL_108]] : f32
// CHECK: return %[[VAL_109]] : f32
// CHECK: }
func @expm1_scalar(%arg0: f32) -> f32 {
@@ -191,7 +191,7 @@ func @expm1_scalar(%arg0: f32) -> f32 {
// CHECK-COUNT-5: select
// CHECK-NOT: expm1
// CHECK-COUNT-3: select
-// CHECK: %[[VAL_115:.*]] = select
+// CHECK: %[[VAL_115:.*]] = arith.select
// CHECK: return %[[VAL_115]] : vector<8x8xf32>
// CHECK: }
func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
@@ -224,7 +224,7 @@ func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
// CHECK: %[[VAL_21:.*]] = arith.constant 23 : i32
// CHECK: %[[VAL_22:.*]] = arith.constant 0.693147182 : f32
// CHECK: %[[VAL_23:.*]] = arith.cmpf ogt, %[[X]], %[[VAL_4]] : f32
-// CHECK: %[[VAL_24:.*]] = select %[[VAL_23]], %[[X]], %[[VAL_4]] : f32
+// CHECK: %[[VAL_24:.*]] = arith.select %[[VAL_23]], %[[X]], %[[VAL_4]] : f32
// CHECK-NOT: frexp
// CHECK: %[[VAL_25:.*]] = arith.bitcast %[[VAL_24]] : f32 to i32
// CHECK: %[[VAL_26:.*]] = arith.andi %[[VAL_25]], %[[VAL_19]] : i32
@@ -235,9 +235,9 @@ func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
// CHECK: %[[VAL_31:.*]] = arith.sitofp %[[VAL_30]] : i32 to f32
// CHECK: %[[VAL_32:.*]] = arith.subf %[[VAL_31]], %[[VAL_18]] : f32
// CHECK: %[[VAL_33:.*]] = arith.cmpf olt, %[[VAL_28]], %[[VAL_8]] : f32
-// CHECK: %[[VAL_34:.*]] = select %[[VAL_33]], %[[VAL_28]], %[[VAL_1]] : f32
+// CHECK: %[[VAL_34:.*]] = arith.select %[[VAL_33]], %[[VAL_28]], %[[VAL_1]] : f32
// CHECK: %[[VAL_35:.*]] = arith.subf %[[VAL_28]], %[[VAL_2]] : f32
-// CHECK: %[[VAL_36:.*]] = select %[[VAL_33]], %[[VAL_2]], %[[VAL_1]] : f32
+// CHECK: %[[VAL_36:.*]] = arith.select %[[VAL_33]], %[[VAL_2]], %[[VAL_1]] : f32
// CHECK: %[[VAL_37:.*]] = arith.subf %[[VAL_32]], %[[VAL_36]] : f32
// CHECK: %[[VAL_38:.*]] = arith.addf %[[VAL_35]], %[[VAL_34]] : f32
// CHECK: %[[VAL_39:.*]] = arith.mulf %[[VAL_38]], %[[VAL_38]] : f32
@@ -257,9 +257,9 @@ func @expm1_vector(%arg0: vector<8x8xf32>) -> vector<8x8xf32> {
// CHECK: %[[VAL_53:.*]] = arith.cmpf ult, %[[X]], %[[VAL_1]] : f32
// CHECK: %[[VAL_54:.*]] = arith.cmpf oeq, %[[X]], %[[VAL_1]] : f32
// CHECK: %[[VAL_55:.*]] = arith.cmpf oeq, %[[X]], %[[VAL_6]] : f32
-// CHECK: %[[VAL_56:.*]] = select %[[VAL_55]], %[[VAL_6]], %[[VAL_52]] : f32
-// CHECK: %[[VAL_57:.*]] = select %[[VAL_53]], %[[VAL_7]], %[[VAL_56]] : f32
-// CHECK: %[[VAL_58:.*]] = select %[[VAL_54]], %[[VAL_5]], %[[VAL_57]] : f32
+// CHECK: %[[VAL_56:.*]] = arith.select %[[VAL_55]], %[[VAL_6]], %[[VAL_52]] : f32
+// CHECK: %[[VAL_57:.*]] = arith.select %[[VAL_53]], %[[VAL_7]], %[[VAL_56]] : f32
+// CHECK: %[[VAL_58:.*]] = arith.select %[[VAL_54]], %[[VAL_5]], %[[VAL_57]] : f32
// CHECK: return %[[VAL_58]] : f32
// CHECK: }
func @log_scalar(%arg0: f32) -> f32 {
@@ -271,7 +271,7 @@ func @log_scalar(%arg0: f32) -> f32 {
// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[CST_LN2:.*]] = arith.constant dense<0.693147182> : vector<8xf32>
// CHECK-COUNT-5: select
-// CHECK: %[[VAL_71:.*]] = select
+// CHECK: %[[VAL_71:.*]] = arith.select
// CHECK: return %[[VAL_71]] : vector<8xf32>
// CHECK: }
func @log_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -283,7 +283,7 @@ func @log_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK-SAME: %[[VAL_0:.*]]: f32) -> f32 {
// CHECK: %[[CST_LOG2E:.*]] = arith.constant 1.44269502 : f32
// CHECK-COUNT-5: select
-// CHECK: %[[VAL_65:.*]] = select
+// CHECK: %[[VAL_65:.*]] = arith.select
// CHECK: return %[[VAL_65]] : f32
// CHECK: }
func @log2_scalar(%arg0: f32) -> f32 {
@@ -295,7 +295,7 @@ func @log2_scalar(%arg0: f32) -> f32 {
// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[CST_LOG2E:.*]] = arith.constant dense<1.44269502> : vector<8xf32>
// CHECK-COUNT-5: select
-// CHECK: %[[VAL_71:.*]] = select
+// CHECK: %[[VAL_71:.*]] = arith.select
// CHECK: return %[[VAL_71]] : vector<8xf32>
// CHECK: }
func @log2_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -310,13 +310,13 @@ func @log2_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[U_SMALL:.*]] = arith.cmpf oeq, %[[U]], %[[CST_ONE]] : f32
// CHECK-NOT: log
// CHECK-COUNT-5: select
-// CHECK: %[[LOG_U:.*]] = select
+// CHECK: %[[LOG_U:.*]] = arith.select
// CHECK: %[[U_INF:.*]] = arith.cmpf oeq, %[[U]], %[[LOG_U]] : f32
// CHECK: %[[VAL_69:.*]] = arith.subf %[[U]], %[[CST_ONE]] : f32
// CHECK: %[[VAL_70:.*]] = arith.divf %[[LOG_U]], %[[VAL_69]] : f32
// CHECK: %[[LOG_LARGE:.*]] = arith.mulf %[[X]], %[[VAL_70]] : f32
// CHECK: %[[VAL_72:.*]] = arith.ori %[[U_SMALL]], %[[U_INF]] : i1
-// CHECK: %[[APPROX:.*]] = select %[[VAL_72]], %[[X]], %[[LOG_LARGE]] : f32
+// CHECK: %[[APPROX:.*]] = arith.select %[[VAL_72]], %[[X]], %[[LOG_LARGE]] : f32
// CHECK: return %[[APPROX]] : f32
// CHECK: }
func @log1p_scalar(%arg0: f32) -> f32 {
@@ -328,7 +328,7 @@ func @log1p_scalar(%arg0: f32) -> f32 {
// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[CST_ONE:.*]] = arith.constant dense<1.000000e+00> : vector<8xf32>
// CHECK-COUNT-6: select
-// CHECK: %[[VAL_79:.*]] = select
+// CHECK: %[[VAL_79:.*]] = arith.select
// CHECK: return %[[VAL_79]] : vector<8xf32>
// CHECK: }
func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -354,9 +354,9 @@ func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_13:.*]] = arith.constant 1.18534706E-4 : f32
// CHECK: %[[VAL_14:.*]] = arith.constant 1.19825836E-6 : f32
// CHECK: %[[VAL_15:.*]] = arith.cmpf olt, %[[VAL_0]], %[[VAL_2]] : f32
-// CHECK: %[[VAL_16:.*]] = select %[[VAL_15]], %[[VAL_0]], %[[VAL_2]] : f32
+// CHECK: %[[VAL_16:.*]] = arith.select %[[VAL_15]], %[[VAL_0]], %[[VAL_2]] : f32
// CHECK: %[[VAL_17:.*]] = arith.cmpf ogt, %[[VAL_16]], %[[VAL_1]] : f32
-// CHECK: %[[VAL_18:.*]] = select %[[VAL_17]], %[[VAL_16]], %[[VAL_1]] : f32
+// CHECK: %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_16]], %[[VAL_1]] : f32
// CHECK: %[[VAL_19:.*]] = math.abs %[[VAL_0]] : f32
// CHECK: %[[VAL_20:.*]] = arith.cmpf olt, %[[VAL_19]], %[[VAL_3]] : f32
// CHECK: %[[VAL_21:.*]] = arith.mulf %[[VAL_18]], %[[VAL_18]] : f32
@@ -371,7 +371,7 @@ func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_30:.*]] = math.fma %[[VAL_21]], %[[VAL_29]], %[[VAL_12]] : f32
// CHECK: %[[VAL_31:.*]] = math.fma %[[VAL_21]], %[[VAL_30]], %[[VAL_11]] : f32
// CHECK: %[[VAL_32:.*]] = arith.divf %[[VAL_28]], %[[VAL_31]] : f32
-// CHECK: %[[VAL_33:.*]] = select %[[VAL_20]], %[[VAL_18]], %[[VAL_32]] : f32
+// CHECK: %[[VAL_33:.*]] = arith.select %[[VAL_20]], %[[VAL_18]], %[[VAL_32]] : f32
// CHECK: return %[[VAL_33]] : f32
// CHECK: }
func @tanh_scalar(%arg0: f32) -> f32 {
@@ -384,7 +384,7 @@ func @tanh_scalar(%arg0: f32) -> f32 {
// CHECK: %[[VAL_1:.*]] = arith.constant dense<-7.99881172> : vector<8xf32>
// CHECK-NOT: tanh
// CHECK-COUNT-2: select
-// CHECK: %[[VAL_33:.*]] = select
+// CHECK: %[[VAL_33:.*]] = arith.select
// CHECK: return %[[VAL_33]] : vector<8xf32>
// CHECK: }
func @tanh_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -418,7 +418,7 @@ func @rsqrt_scalar(%arg0: f32) -> f32 {
// AVX2: %[[VAL_10:.*]] = arith.mulf %[[VAL_5]], %[[VAL_9]] : vector<8xf32>
// AVX2: %[[VAL_11:.*]] = math.fma %[[VAL_9]], %[[VAL_10]], %[[VAL_2]] : vector<8xf32>
// AVX2: %[[VAL_12:.*]] = arith.mulf %[[VAL_9]], %[[VAL_11]] : vector<8xf32>
-// AVX2: %[[VAL_13:.*]] = select %[[VAL_8]], %[[VAL_9]], %[[VAL_12]] : vector<8xi1>, vector<8xf32>
+// AVX2: %[[VAL_13:.*]] = arith.select %[[VAL_8]], %[[VAL_9]], %[[VAL_12]] : vector<8xi1>, vector<8xf32>
// AVX2: return %[[VAL_13]] : vector<8xf32>
// AVX2: }
func @rsqrt_vector_8xf32(%arg0: vector<8xf32>) -> vector<8xf32> {
@@ -518,13 +518,13 @@ func @rsqrt_vector_2x16xf32(%arg0: vector<2x16xf32>) -> vector<2x16xf32> {
// CHECK-DAG: %[[ABS:.+]] = math.abs %arg0
// CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]]
// CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]]
-// CHECK-DAG: %[[SEL:.+]] = select %[[CMP]], %[[ABS]], %[[DIV]]
+// CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]]
// CHECK-DAG: %[[P0:.+]] = math.fma %[[SEL]], %[[N1]], %[[N2]]
// CHECK-DAG: %[[P1:.+]] = math.fma %[[SEL]], %[[P0]], %[[N3]]
// CHECK-DAG: %[[P2:.+]] = math.fma %[[SEL]], %[[P1]], %[[N4]]
// CHECK-DAG: %[[P3:.+]] = arith.mulf %[[SEL]], %[[P2]]
// CHECK-DAG: %[[SUB:.+]] = arith.subf %[[HALF_PI]], %[[P3]]
-// CHECK-DAG: %[[EST:.+]] = select %[[CMP]], %[[P3]], %[[SUB]]
+// CHECK-DAG: %[[EST:.+]] = arith.select %[[CMP]], %[[P3]], %[[SUB]]
// CHECK-DAG: %[[RES:.+]] = math.copysign %[[EST]], %arg0
// CHECK: return %[[RES]]
func @atan_scalar(%arg0: f32) -> f32 {
@@ -546,13 +546,13 @@ func @atan_scalar(%arg0: f32) -> f32 {
// CHECK-DAG: %[[ABS:.+]] = math.abs %[[RATIO]]
// CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]]
// CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]]
-// CHECK-DAG: %[[SEL:.+]] = select %[[CMP]], %[[ABS]], %[[DIV]]
+// CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]]
// CHECK-DAG: %[[P0:.+]] = math.fma %[[SEL]], %[[N1]], %[[N2]]
// CHECK-DAG: %[[P1:.+]] = math.fma %[[SEL]], %[[P0]], %[[N3]]
// CHECK-DAG: %[[P2:.+]] = math.fma %[[SEL]], %[[P1]], %[[N4]]
// CHECK-DAG: %[[P3:.+]] = arith.mulf %[[SEL]], %[[P2]]
// CHECK-DAG: %[[SUB:.+]] = arith.subf %[[HALF_PI]], %[[P3]]
-// CHECK-DAG: %[[EST:.+]] = select %[[CMP]], %[[P3]], %[[SUB]]
+// CHECK-DAG: %[[EST:.+]] = arith.select %[[CMP]], %[[P3]], %[[SUB]]
// CHECK-DAG: %[[ATAN:.+]] = math.copysign %[[EST]], %[[RATIO]]
// Handle the case of x < 0:
@@ -561,27 +561,27 @@ func @atan_scalar(%arg0: f32) -> f32 {
// CHECK-DAG: %[[ADD_PI:.+]] = arith.addf %[[ATAN]], %[[PI]]
// CHECK-DAG: %[[SUB_PI:.+]] = arith.subf %[[ATAN]], %[[PI]]
// CHECK-DAG: %[[CMP_ATAN:.+]] = arith.cmpf ogt, %[[ATAN]], %[[ZERO]]
-// CHECK-DAG: %[[ATAN_ADJUST:.+]] = select %[[CMP_ATAN]], %[[SUB_PI]], %[[ADD_PI]]
+// CHECK-DAG: %[[ATAN_ADJUST:.+]] = arith.select %[[CMP_ATAN]], %[[SUB_PI]], %[[ADD_PI]]
// CHECK-DAG: %[[X_NEG:.+]] = arith.cmpf ogt, %arg1, %[[ZERO]]
-// CHECK-DAG: %[[ATAN_EST:.+]] = select %[[X_NEG]], %[[ATAN]], %[[ATAN_ADJUST]]
+// CHECK-DAG: %[[ATAN_EST:.+]] = arith.select %[[X_NEG]], %[[ATAN]], %[[ATAN_ADJUST]]
// Handle PI / 2 edge case:
// CHECK-DAG: %[[X_ZERO:.+]] = arith.cmpf oeq, %arg1, %[[ZERO]]
// CHECK-DAG: %[[Y_POS:.+]] = arith.cmpf ogt, %arg0, %[[ZERO]]
// CHECK-DAG: %[[IS_HALF_PI:.+]] = arith.andi %[[X_ZERO]], %[[Y_POS]]
-// CHECK-DAG: %[[EDGE1:.+]] = select %[[IS_HALF_PI]], %[[HALF_PI]], %[[ATAN_EST]]
+// CHECK-DAG: %[[EDGE1:.+]] = arith.select %[[IS_HALF_PI]], %[[HALF_PI]], %[[ATAN_EST]]
// Handle -PI / 2 edge case:
// CHECK-DAG: %[[NEG_HALF_PI:.+]] = arith.constant -1.57079637
// CHECK-DAG: %[[Y_NEG:.+]] = arith.cmpf olt, %arg0, %[[ZERO]]
// CHECK-DAG: %[[IS_NEG_HALF_PI:.+]] = arith.andi %[[X_ZERO]], %[[Y_NEG]]
-// CHECK-DAG: %[[EDGE2:.+]] = select %[[IS_NEG_HALF_PI]], %[[NEG_HALF_PI]], %[[EDGE1]]
+// CHECK-DAG: %[[EDGE2:.+]] = arith.select %[[IS_NEG_HALF_PI]], %[[NEG_HALF_PI]], %[[EDGE1]]
// Handle Nan edgecase:
// CHECK-DAG: %[[Y_ZERO:.+]] = arith.cmpf oeq, %arg0, %[[ZERO]]
// CHECK-DAG: %[[X_Y_ZERO:.+]] = arith.andi %[[X_ZERO]], %[[Y_ZERO]]
// CHECK-DAG: %[[NAN:.+]] = arith.constant 0x7FC00000
-// CHECK-DAG: %[[EDGE3:.+]] = select %[[X_Y_ZERO]], %[[NAN]], %[[EDGE2]]
+// CHECK-DAG: %[[EDGE3:.+]] = arith.select %[[X_Y_ZERO]], %[[NAN]], %[[EDGE2]]
// CHECK: return %[[EDGE3]]
func @atan2_scalar(%arg0: f32, %arg1: f32) -> f32 {
diff --git a/mlir/test/Dialect/MemRef/expand-ops.mlir b/mlir/test/Dialect/MemRef/expand-ops.mlir
index bcf83042184f3..261e95f881ba2 100644
--- a/mlir/test/Dialect/MemRef/expand-ops.mlir
+++ b/mlir/test/Dialect/MemRef/expand-ops.mlir
@@ -9,7 +9,7 @@ func @atomic_rmw_to_generic(%F: memref<10xf32>, %f: f32, %i: index) -> f32 {
// CHECK: %0 = memref.generic_atomic_rmw %arg0[%arg2] : memref<10xf32> {
// CHECK: ^bb0([[CUR_VAL:%.*]]: f32):
// CHECK: [[CMP:%.*]] = arith.cmpf ogt, [[CUR_VAL]], [[f]] : f32
-// CHECK: [[SELECT:%.*]] = select [[CMP]], [[CUR_VAL]], [[f]] : f32
+// CHECK: [[SELECT:%.*]] = arith.select [[CMP]], [[CUR_VAL]], [[f]] : f32
// CHECK: memref.atomic_yield [[SELECT]] : f32
// CHECK: }
// CHECK: return %0 : f32
diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir
index 8c00612920bbc..a9ad591d31efa 100644
--- a/mlir/test/Dialect/SCF/canonicalize.mlir
+++ b/mlir/test/Dialect/SCF/canonicalize.mlir
@@ -280,7 +280,7 @@ func @to_select1(%cond: i1) -> index {
// CHECK-LABEL: func @to_select1
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK: [[V0:%.*]] = select {{.*}}, [[C0]], [[C1]]
+// CHECK: [[V0:%.*]] = arith.select {{.*}}, [[C0]], [[C1]]
// CHECK: return [[V0]] : index
// -----
@@ -299,7 +299,7 @@ func @to_select_same_val(%cond: i1) -> (index, index) {
// CHECK-LABEL: func @to_select_same_val
// CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
-// CHECK: [[V0:%.*]] = select {{.*}}, [[C0]], [[C1]]
+// CHECK: [[V0:%.*]] = arith.select {{.*}}, [[C0]], [[C1]]
// CHECK: return [[V0]], [[C1]] : index, index
// -----
@@ -322,8 +322,8 @@ func @to_select2(%cond: i1) -> (index, index) {
// CHECK-DAG: [[C1:%.*]] = arith.constant 1 : index
// CHECK-DAG: [[C2:%.*]] = arith.constant 2 : index
// CHECK-DAG: [[C3:%.*]] = arith.constant 3 : index
-// CHECK: [[V0:%.*]] = select {{.*}}, [[C0]], [[C2]]
-// CHECK: [[V1:%.*]] = select {{.*}}, [[C1]], [[C3]]
+// CHECK: [[V0:%.*]] = arith.select {{.*}}, [[C0]], [[C2]]
+// CHECK: [[V1:%.*]] = arith.select {{.*}}, [[C1]], [[C3]]
// CHECK: return [[V0]], [[V1]] : index
// -----
diff --git a/mlir/test/Dialect/SCF/ops.mlir b/mlir/test/Dialect/SCF/ops.mlir
index 0321479553768..31bb1290bcb66 100644
--- a/mlir/test/Dialect/SCF/ops.mlir
+++ b/mlir/test/Dialect/SCF/ops.mlir
@@ -8,9 +8,9 @@ func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
scf.for %i0 = %arg0 to %arg1 step %arg2 {
scf.for %i1 = %arg0 to %arg1 step %arg2 {
%min_cmp = arith.cmpi slt, %i0, %i1 : index
- %min = select %min_cmp, %i0, %i1 : index
+ %min = arith.select %min_cmp, %i0, %i1 : index
%max_cmp = arith.cmpi sge, %i0, %i1 : index
- %max = select %max_cmp, %i0, %i1 : index
+ %max = arith.select %max_cmp, %i0, %i1 : index
scf.for %i2 = %min to %max step %i1 {
}
}
@@ -21,9 +21,9 @@ func @std_for(%arg0 : index, %arg1 : index, %arg2 : index) {
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
// CHECK-NEXT: %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} : index
-// CHECK-NEXT: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: %{{.*}} = arith.cmpi sge, %{{.*}}, %{{.*}} : index
-// CHECK-NEXT: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : index
+// CHECK-NEXT: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : index
// CHECK-NEXT: scf.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} {
func @std_if(%arg0: i1, %arg1: f32) {
@@ -56,9 +56,9 @@ func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
step (%arg4, %step) {
%min_cmp = arith.cmpi slt, %i0, %i1 : index
- %min = select %min_cmp, %i0, %i1 : index
+ %min = arith.select %min_cmp, %i0, %i1 : index
%max_cmp = arith.cmpi sge, %i0, %i1 : index
- %max = select %max_cmp, %i0, %i1 : index
+ %max = arith.select %max_cmp, %i0, %i1 : index
%zero = arith.constant 0.0 : f32
%int_zero = arith.constant 0 : i32
%red:2 = scf.parallel (%i2) = (%min) to (%max) step (%i1)
@@ -89,9 +89,9 @@ func @std_parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK-NEXT: scf.parallel (%[[I0:.*]], %[[I1:.*]]) = (%[[ARG0]], %[[ARG1]]) to
// CHECK: (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[STEP]]) {
// CHECK-NEXT: %[[MIN_CMP:.*]] = arith.cmpi slt, %[[I0]], %[[I1]] : index
-// CHECK-NEXT: %[[MIN:.*]] = select %[[MIN_CMP]], %[[I0]], %[[I1]] : index
+// CHECK-NEXT: %[[MIN:.*]] = arith.select %[[MIN_CMP]], %[[I0]], %[[I1]] : index
// CHECK-NEXT: %[[MAX_CMP:.*]] = arith.cmpi sge, %[[I0]], %[[I1]] : index
-// CHECK-NEXT: %[[MAX:.*]] = select %[[MAX_CMP]], %[[I0]], %[[I1]] : index
+// CHECK-NEXT: %[[MAX:.*]] = arith.select %[[MAX_CMP]], %[[I0]], %[[I1]] : index
// CHECK-NEXT: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
// CHECK-NEXT: %[[INT_ZERO:.*]] = arith.constant 0 : i32
// CHECK-NEXT: scf.parallel (%{{.*}}) = (%[[MIN]]) to (%[[MAX]])
diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
index 1990803634f5d..d56ac7101202d 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir
@@ -138,7 +138,7 @@ func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) -> te
// CHECK: }
// CHECK: %[[VAL_24:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_26:.*]] = select %[[VAL_24]], %[[VAL_25]], %[[VAL_18]] : index
+// CHECK: %[[VAL_26:.*]] = arith.select %[[VAL_24]], %[[VAL_25]], %[[VAL_18]] : index
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_26]], %[[VAL_27]] : index, index
// CHECK: }
@@ -345,7 +345,7 @@ func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tensor<32
// CHECK: }
// CHECK: %[[VAL_27:.*]] = arith.cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
-// CHECK: %[[VAL_29:.*]] = select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
+// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
// CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: }
@@ -441,7 +441,7 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tensor<32
// CHECK: }
// CHECK: %[[VAL_27:.*]] = arith.cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
-// CHECK: %[[VAL_29:.*]] = select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
+// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_27]], %[[VAL_28]], %[[VAL_19]] : index
// CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: }
@@ -528,7 +528,7 @@ func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32
// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_23]]] : memref<?xindex>
// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = arith.cmpi ult, %[[VAL_26]], %[[VAL_25]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_29:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_30:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = arith.andi %[[VAL_29]], %[[VAL_30]] : i1
@@ -553,10 +553,10 @@ func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tensor<32
// CHECK: }
// CHECK: %[[VAL_39:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_40:.*]] = arith.addi %[[VAL_23]], %[[VAL_4]] : index
-// CHECK: %[[VAL_41:.*]] = select %[[VAL_39]], %[[VAL_40]], %[[VAL_23]] : index
+// CHECK: %[[VAL_41:.*]] = arith.select %[[VAL_39]], %[[VAL_40]], %[[VAL_23]] : index
// CHECK: %[[VAL_42:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_43:.*]] = arith.addi %[[VAL_24]], %[[VAL_4]] : index
-// CHECK: %[[VAL_44:.*]] = select %[[VAL_42]], %[[VAL_43]], %[[VAL_24]] : index
+// CHECK: %[[VAL_44:.*]] = arith.select %[[VAL_42]], %[[VAL_43]], %[[VAL_24]] : index
// CHECK: scf.yield %[[VAL_41]], %[[VAL_44]] : index, index
// CHECK: }
// CHECK: scf.for %[[VAL_45:.*]] = %[[VAL_46:.*]]#0 to %[[VAL_14]] step %[[VAL_4]] {
@@ -612,7 +612,7 @@ func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_23]]] : memref<?xindex>
// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = arith.cmpi ult, %[[VAL_26]], %[[VAL_25]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_27]], %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_29:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_30:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_31:.*]] = arith.andi %[[VAL_29]], %[[VAL_30]] : i1
@@ -625,10 +625,10 @@ func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
// CHECK: }
// CHECK: %[[VAL_35:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_28]] : index
// CHECK: %[[VAL_36:.*]] = arith.addi %[[VAL_23]], %[[VAL_4]] : index
-// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
+// CHECK: %[[VAL_37:.*]] = arith.select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
// CHECK: %[[VAL_38:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_28]] : index
// CHECK: %[[VAL_39:.*]] = arith.addi %[[VAL_24]], %[[VAL_4]] : index
-// CHECK: %[[VAL_40:.*]] = select %[[VAL_38]], %[[VAL_39]], %[[VAL_24]] : index
+// CHECK: %[[VAL_40:.*]] = arith.select %[[VAL_38]], %[[VAL_39]], %[[VAL_24]] : index
// CHECK: scf.yield %[[VAL_37]], %[[VAL_40]] : index, index
// CHECK: }
// CHECK: %[[VAL_41:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<32xf32>
@@ -675,7 +675,7 @@ func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_29:.*]] = select %[[VAL_28]], %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_28]], %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_30:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_29]] : index
// CHECK: %[[VAL_31:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = arith.andi %[[VAL_30]], %[[VAL_31]] : i1
@@ -704,10 +704,10 @@ func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: tens
// CHECK: }
// CHECK: %[[VAL_44:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_29]] : index
// CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_24]], %[[VAL_5]] : index
-// CHECK: %[[VAL_46:.*]] = select %[[VAL_44]], %[[VAL_45]], %[[VAL_24]] : index
+// CHECK: %[[VAL_46:.*]] = arith.select %[[VAL_44]], %[[VAL_45]], %[[VAL_24]] : index
// CHECK: %[[VAL_47:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_29]] : index
// CHECK: %[[VAL_48:.*]] = arith.addi %[[VAL_25]], %[[VAL_5]] : index
-// CHECK: %[[VAL_49:.*]] = select %[[VAL_47]], %[[VAL_48]], %[[VAL_25]] : index
+// CHECK: %[[VAL_49:.*]] = arith.select %[[VAL_47]], %[[VAL_48]], %[[VAL_25]] : index
// CHECK: scf.yield %[[VAL_46]], %[[VAL_49]] : index, index
// CHECK: }
// CHECK: scf.for %[[VAL_50:.*]] = %[[VAL_51:.*]]#0 to %[[VAL_15]] step %[[VAL_5]] {
@@ -769,7 +769,7 @@ func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc:
// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_24]]] : memref<?xindex>
// CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
// CHECK: %[[VAL_28:.*]] = arith.cmpi ult, %[[VAL_27]], %[[VAL_26]] : index
-// CHECK: %[[VAL_29:.*]] = select %[[VAL_28]], %[[VAL_27]], %[[VAL_26]] : index
+// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_28]], %[[VAL_27]], %[[VAL_26]] : index
// CHECK: %[[VAL_30:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_29]] : index
// CHECK: %[[VAL_31:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_29]] : index
// CHECK: %[[VAL_32:.*]] = arith.andi %[[VAL_30]], %[[VAL_31]] : i1
@@ -797,10 +797,10 @@ func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, %argc:
// CHECK: }
// CHECK: %[[VAL_43:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_29]] : index
// CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_24]], %[[VAL_5]] : index
-// CHECK: %[[VAL_45:.*]] = select %[[VAL_43]], %[[VAL_44]], %[[VAL_24]] : index
+// CHECK: %[[VAL_45:.*]] = arith.select %[[VAL_43]], %[[VAL_44]], %[[VAL_24]] : index
// CHECK: %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_29]] : index
// CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_25]], %[[VAL_5]] : index
-// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_25]] : index
+// CHECK: %[[VAL_48:.*]] = arith.select %[[VAL_46]], %[[VAL_47]], %[[VAL_25]] : index
// CHECK: scf.yield %[[VAL_45]], %[[VAL_48]] : index, index
// CHECK: }
// CHECK: scf.for %[[VAL_49:.*]] = %[[VAL_50:.*]]#0 to %[[VAL_15]] step %[[VAL_5]] {
@@ -914,7 +914,7 @@ func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32
// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_25]]] : memref<?xindex>
// CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_26]]] : memref<?xindex>
// CHECK: %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_29]], %[[VAL_28]] : index
-// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
+// CHECK: %[[VAL_31:.*]] = arith.select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_32:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_33:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_34:.*]] = arith.andi %[[VAL_32]], %[[VAL_33]] : i1
@@ -945,10 +945,10 @@ func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tensor<f32
// CHECK: }
// CHECK: %[[VAL_50:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_31]] : index
// CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_25]], %[[VAL_4]] : index
-// CHECK: %[[VAL_52:.*]] = select %[[VAL_50]], %[[VAL_51]], %[[VAL_25]] : index
+// CHECK: %[[VAL_52:.*]] = arith.select %[[VAL_50]], %[[VAL_51]], %[[VAL_25]] : index
// CHECK: %[[VAL_53:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_31]] : index
// CHECK: %[[VAL_54:.*]] = arith.addi %[[VAL_26]], %[[VAL_4]] : index
-// CHECK: %[[VAL_55:.*]] = select %[[VAL_53]], %[[VAL_54]], %[[VAL_26]] : index
+// CHECK: %[[VAL_55:.*]] = arith.select %[[VAL_53]], %[[VAL_54]], %[[VAL_26]] : index
// CHECK: scf.yield %[[VAL_52]], %[[VAL_55]], %[[VAL_56:.*]] : index, index, f32
// CHECK: }
// CHECK: %[[VAL_57:.*]] = scf.for %[[VAL_58:.*]] = %[[VAL_59:.*]]#0 to %[[VAL_15]] step %[[VAL_4]] iter_args(%[[VAL_60:.*]] = %[[VAL_59]]#2) -> (f32) {
@@ -1025,7 +1025,7 @@ func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
// CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_29]]] : memref<?xindex>
// CHECK: %[[VAL_33:.*]] = arith.cmpi ult, %[[VAL_32]], %[[VAL_31]] : index
-// CHECK: %[[VAL_34:.*]] = select %[[VAL_33]], %[[VAL_32]], %[[VAL_31]] : index
+// CHECK: %[[VAL_34:.*]] = arith.select %[[VAL_33]], %[[VAL_32]], %[[VAL_31]] : index
// CHECK: %[[VAL_35:.*]] = arith.cmpi eq, %[[VAL_31]], %[[VAL_34]] : index
// CHECK: %[[VAL_36:.*]] = arith.cmpi eq, %[[VAL_32]], %[[VAL_34]] : index
// CHECK: %[[VAL_37:.*]] = arith.andi %[[VAL_35]], %[[VAL_36]] : i1
@@ -1058,10 +1058,10 @@ func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
// CHECK: }
// CHECK: %[[VAL_55:.*]] = arith.cmpi eq, %[[VAL_31]], %[[VAL_34]] : index
// CHECK: %[[VAL_56:.*]] = arith.addi %[[VAL_28]], %[[VAL_5]] : index
-// CHECK: %[[VAL_57:.*]] = select %[[VAL_55]], %[[VAL_56]], %[[VAL_28]] : index
+// CHECK: %[[VAL_57:.*]] = arith.select %[[VAL_55]], %[[VAL_56]], %[[VAL_28]] : index
// CHECK: %[[VAL_58:.*]] = arith.cmpi eq, %[[VAL_32]], %[[VAL_34]] : index
// CHECK: %[[VAL_59:.*]] = arith.addi %[[VAL_29]], %[[VAL_5]] : index
-// CHECK: %[[VAL_60:.*]] = select %[[VAL_58]], %[[VAL_59]], %[[VAL_29]] : index
+// CHECK: %[[VAL_60:.*]] = arith.select %[[VAL_58]], %[[VAL_59]], %[[VAL_29]] : index
// CHECK: scf.yield %[[VAL_57]], %[[VAL_60]], %[[VAL_61:.*]] : index, index, f32
// CHECK: }
// CHECK: %[[VAL_62:.*]] = scf.for %[[VAL_63:.*]] = %[[VAL_64:.*]]#0 to %[[VAL_18]] step %[[VAL_5]] iter_args(%[[VAL_65:.*]] = %[[VAL_64]]#2) -> (f32) {
@@ -1186,10 +1186,10 @@ func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
// CHECK: }
// CHECK: %[[VAL_60:.*]] = arith.cmpi eq, %[[VAL_33]], %[[VAL_32]] : index
// CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_30]], %[[VAL_7]] : index
-// CHECK: %[[VAL_62:.*]] = select %[[VAL_60]], %[[VAL_61]], %[[VAL_30]] : index
+// CHECK: %[[VAL_62:.*]] = arith.select %[[VAL_60]], %[[VAL_61]], %[[VAL_30]] : index
// CHECK: %[[VAL_63:.*]] = arith.cmpi eq, %[[VAL_34]], %[[VAL_32]] : index
// CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_31]], %[[VAL_7]] : index
-// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_31]] : index
+// CHECK: %[[VAL_65:.*]] = arith.select %[[VAL_63]], %[[VAL_64]], %[[VAL_31]] : index
// CHECK: %[[VAL_66:.*]] = arith.addi %[[VAL_32]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_62]], %[[VAL_65]], %[[VAL_66]] : index, index, index
// CHECK: }
@@ -1218,7 +1218,7 @@ func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
// CHECK: }
// CHECK: %[[VAL_84:.*]] = arith.cmpi eq, %[[VAL_74]], %[[VAL_73]] : index
// CHECK: %[[VAL_85:.*]] = arith.addi %[[VAL_72]], %[[VAL_7]] : index
-// CHECK: %[[VAL_86:.*]] = select %[[VAL_84]], %[[VAL_85]], %[[VAL_72]] : index
+// CHECK: %[[VAL_86:.*]] = arith.select %[[VAL_84]], %[[VAL_85]], %[[VAL_72]] : index
// CHECK: %[[VAL_87:.*]] = arith.addi %[[VAL_73]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_86]], %[[VAL_87]] : index, index
// CHECK: }
@@ -1247,7 +1247,7 @@ func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
// CHECK: }
// CHECK: %[[VAL_106:.*]] = arith.cmpi eq, %[[VAL_96]], %[[VAL_95]] : index
// CHECK: %[[VAL_107:.*]] = arith.addi %[[VAL_94]], %[[VAL_7]] : index
-// CHECK: %[[VAL_108:.*]] = select %[[VAL_106]], %[[VAL_107]], %[[VAL_94]] : index
+// CHECK: %[[VAL_108:.*]] = arith.select %[[VAL_106]], %[[VAL_107]], %[[VAL_94]] : index
// CHECK: %[[VAL_109:.*]] = arith.addi %[[VAL_95]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_108]], %[[VAL_109]] : index, index
// CHECK: }
@@ -1326,10 +1326,10 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: %[[VAL_38:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_34]]] : memref<?xindex>
// CHECK: %[[VAL_39:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_35]]] : memref<?xindex>
// CHECK: %[[VAL_40:.*]] = arith.cmpi ult, %[[VAL_39]], %[[VAL_38]] : index
-// CHECK: %[[VAL_41:.*]] = select %[[VAL_40]], %[[VAL_39]], %[[VAL_38]] : index
+// CHECK: %[[VAL_41:.*]] = arith.select %[[VAL_40]], %[[VAL_39]], %[[VAL_38]] : index
// CHECK: %[[VAL_42:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_36]]] : memref<?xindex>
// CHECK: %[[VAL_43:.*]] = arith.cmpi ult, %[[VAL_42]], %[[VAL_41]] : index
-// CHECK: %[[VAL_44:.*]] = select %[[VAL_43]], %[[VAL_42]], %[[VAL_41]] : index
+// CHECK: %[[VAL_44:.*]] = arith.select %[[VAL_43]], %[[VAL_42]], %[[VAL_41]] : index
// CHECK: %[[VAL_45:.*]] = arith.cmpi eq, %[[VAL_38]], %[[VAL_44]] : index
// CHECK: %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_39]], %[[VAL_44]] : index
// CHECK: %[[VAL_47:.*]] = arith.andi %[[VAL_45]], %[[VAL_46]] : i1
@@ -1408,13 +1408,13 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: }
// CHECK: %[[VAL_99:.*]] = arith.cmpi eq, %[[VAL_38]], %[[VAL_44]] : index
// CHECK: %[[VAL_100:.*]] = arith.addi %[[VAL_34]], %[[VAL_5]] : index
-// CHECK: %[[VAL_101:.*]] = select %[[VAL_99]], %[[VAL_100]], %[[VAL_34]] : index
+// CHECK: %[[VAL_101:.*]] = arith.select %[[VAL_99]], %[[VAL_100]], %[[VAL_34]] : index
// CHECK: %[[VAL_102:.*]] = arith.cmpi eq, %[[VAL_39]], %[[VAL_44]] : index
// CHECK: %[[VAL_103:.*]] = arith.addi %[[VAL_35]], %[[VAL_5]] : index
-// CHECK: %[[VAL_104:.*]] = select %[[VAL_102]], %[[VAL_103]], %[[VAL_35]] : index
+// CHECK: %[[VAL_104:.*]] = arith.select %[[VAL_102]], %[[VAL_103]], %[[VAL_35]] : index
// CHECK: %[[VAL_105:.*]] = arith.cmpi eq, %[[VAL_42]], %[[VAL_44]] : index
// CHECK: %[[VAL_106:.*]] = arith.addi %[[VAL_36]], %[[VAL_5]] : index
-// CHECK: %[[VAL_107:.*]] = select %[[VAL_105]], %[[VAL_106]], %[[VAL_36]] : index
+// CHECK: %[[VAL_107:.*]] = arith.select %[[VAL_105]], %[[VAL_106]], %[[VAL_36]] : index
// CHECK: scf.yield %[[VAL_101]], %[[VAL_104]], %[[VAL_107]], %[[VAL_108:.*]] : index, index, index, f64
// CHECK: }
// CHECK: %[[VAL_109:.*]]:3 = scf.while (%[[VAL_110:.*]] = %[[VAL_111:.*]]#1, %[[VAL_112:.*]] = %[[VAL_111]]#2, %[[VAL_113:.*]] = %[[VAL_111]]#3) : (index, index, f64) -> (index, index, f64) {
@@ -1427,7 +1427,7 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: %[[VAL_120:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_117]]] : memref<?xindex>
// CHECK: %[[VAL_121:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_118]]] : memref<?xindex>
// CHECK: %[[VAL_122:.*]] = arith.cmpi ult, %[[VAL_121]], %[[VAL_120]] : index
-// CHECK: %[[VAL_123:.*]] = select %[[VAL_122]], %[[VAL_121]], %[[VAL_120]] : index
+// CHECK: %[[VAL_123:.*]] = arith.select %[[VAL_122]], %[[VAL_121]], %[[VAL_120]] : index
// CHECK: %[[VAL_124:.*]] = arith.cmpi eq, %[[VAL_120]], %[[VAL_123]] : index
// CHECK: %[[VAL_125:.*]] = arith.cmpi eq, %[[VAL_121]], %[[VAL_123]] : index
// CHECK: %[[VAL_126:.*]] = arith.andi %[[VAL_124]], %[[VAL_125]] : i1
@@ -1458,10 +1458,10 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: }
// CHECK: %[[VAL_142:.*]] = arith.cmpi eq, %[[VAL_120]], %[[VAL_123]] : index
// CHECK: %[[VAL_143:.*]] = arith.addi %[[VAL_117]], %[[VAL_5]] : index
-// CHECK: %[[VAL_144:.*]] = select %[[VAL_142]], %[[VAL_143]], %[[VAL_117]] : index
+// CHECK: %[[VAL_144:.*]] = arith.select %[[VAL_142]], %[[VAL_143]], %[[VAL_117]] : index
// CHECK: %[[VAL_145:.*]] = arith.cmpi eq, %[[VAL_121]], %[[VAL_123]] : index
// CHECK: %[[VAL_146:.*]] = arith.addi %[[VAL_118]], %[[VAL_5]] : index
-// CHECK: %[[VAL_147:.*]] = select %[[VAL_145]], %[[VAL_146]], %[[VAL_118]] : index
+// CHECK: %[[VAL_147:.*]] = arith.select %[[VAL_145]], %[[VAL_146]], %[[VAL_118]] : index
// CHECK: scf.yield %[[VAL_144]], %[[VAL_147]], %[[VAL_148:.*]] : index, index, f64
// CHECK: }
// CHECK: %[[VAL_149:.*]]:3 = scf.while (%[[VAL_150:.*]] = %[[VAL_151:.*]]#0, %[[VAL_152:.*]] = %[[VAL_153:.*]]#1, %[[VAL_154:.*]] = %[[VAL_153]]#2) : (index, index, f64) -> (index, index, f64) {
@@ -1474,7 +1474,7 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: %[[VAL_161:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_158]]] : memref<?xindex>
// CHECK: %[[VAL_162:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_159]]] : memref<?xindex>
// CHECK: %[[VAL_163:.*]] = arith.cmpi ult, %[[VAL_162]], %[[VAL_161]] : index
-// CHECK: %[[VAL_164:.*]] = select %[[VAL_163]], %[[VAL_162]], %[[VAL_161]] : index
+// CHECK: %[[VAL_164:.*]] = arith.select %[[VAL_163]], %[[VAL_162]], %[[VAL_161]] : index
// CHECK: %[[VAL_165:.*]] = arith.cmpi eq, %[[VAL_161]], %[[VAL_164]] : index
// CHECK: %[[VAL_166:.*]] = arith.cmpi eq, %[[VAL_162]], %[[VAL_164]] : index
// CHECK: %[[VAL_167:.*]] = arith.andi %[[VAL_165]], %[[VAL_166]] : i1
@@ -1505,10 +1505,10 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: }
// CHECK: %[[VAL_183:.*]] = arith.cmpi eq, %[[VAL_161]], %[[VAL_164]] : index
// CHECK: %[[VAL_184:.*]] = arith.addi %[[VAL_158]], %[[VAL_5]] : index
-// CHECK: %[[VAL_185:.*]] = select %[[VAL_183]], %[[VAL_184]], %[[VAL_158]] : index
+// CHECK: %[[VAL_185:.*]] = arith.select %[[VAL_183]], %[[VAL_184]], %[[VAL_158]] : index
// CHECK: %[[VAL_186:.*]] = arith.cmpi eq, %[[VAL_162]], %[[VAL_164]] : index
// CHECK: %[[VAL_187:.*]] = arith.addi %[[VAL_159]], %[[VAL_5]] : index
-// CHECK: %[[VAL_188:.*]] = select %[[VAL_186]], %[[VAL_187]], %[[VAL_159]] : index
+// CHECK: %[[VAL_188:.*]] = arith.select %[[VAL_186]], %[[VAL_187]], %[[VAL_159]] : index
// CHECK: scf.yield %[[VAL_185]], %[[VAL_188]], %[[VAL_189:.*]] : index, index, f64
// CHECK: }
// CHECK: %[[VAL_190:.*]] = scf.for %[[VAL_191:.*]] = %[[VAL_192:.*]]#1 to %[[VAL_23]] step %[[VAL_5]] iter_args(%[[VAL_193:.*]] = %[[VAL_192]]#2) -> (f64) {
@@ -1526,7 +1526,7 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: %[[VAL_209:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_206]]] : memref<?xindex>
// CHECK: %[[VAL_210:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_207]]] : memref<?xindex>
// CHECK: %[[VAL_211:.*]] = arith.cmpi ult, %[[VAL_210]], %[[VAL_209]] : index
-// CHECK: %[[VAL_212:.*]] = select %[[VAL_211]], %[[VAL_210]], %[[VAL_209]] : index
+// CHECK: %[[VAL_212:.*]] = arith.select %[[VAL_211]], %[[VAL_210]], %[[VAL_209]] : index
// CHECK: %[[VAL_213:.*]] = arith.cmpi eq, %[[VAL_209]], %[[VAL_212]] : index
// CHECK: %[[VAL_214:.*]] = arith.cmpi eq, %[[VAL_210]], %[[VAL_212]] : index
// CHECK: %[[VAL_215:.*]] = arith.andi %[[VAL_213]], %[[VAL_214]] : i1
@@ -1557,10 +1557,10 @@ func @four_tensors_op(%arga: tensor<?xf64>,
// CHECK: }
// CHECK: %[[VAL_231:.*]] = arith.cmpi eq, %[[VAL_209]], %[[VAL_212]] : index
// CHECK: %[[VAL_232:.*]] = arith.addi %[[VAL_206]], %[[VAL_5]] : index
-// CHECK: %[[VAL_233:.*]] = select %[[VAL_231]], %[[VAL_232]], %[[VAL_206]] : index
+// CHECK: %[[VAL_233:.*]] = arith.select %[[VAL_231]], %[[VAL_232]], %[[VAL_206]] : index
// CHECK: %[[VAL_234:.*]] = arith.cmpi eq, %[[VAL_210]], %[[VAL_212]] : index
// CHECK: %[[VAL_235:.*]] = arith.addi %[[VAL_207]], %[[VAL_5]] : index
-// CHECK: %[[VAL_236:.*]] = select %[[VAL_234]], %[[VAL_235]], %[[VAL_207]] : index
+// CHECK: %[[VAL_236:.*]] = arith.select %[[VAL_234]], %[[VAL_235]], %[[VAL_207]] : index
// CHECK: scf.yield %[[VAL_233]], %[[VAL_236]], %[[VAL_237:.*]] : index, index, f64
// CHECK: }
// CHECK: %[[VAL_238:.*]] = scf.for %[[VAL_239:.*]] = %[[VAL_240:.*]]#1 to %[[VAL_21]] step %[[VAL_5]] iter_args(%[[VAL_241:.*]] = %[[VAL_240]]#2) -> (f64) {
diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
index 1a0db13928e96..d17601535188b 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir
@@ -131,7 +131,7 @@ func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_30:.*]] = arith.cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_31:.*]] = arith.addi %[[VAL_22]], %[[VAL_7]] : index
-// CHECK: %[[VAL_32:.*]] = select %[[VAL_30]], %[[VAL_31]], %[[VAL_22]] : index
+// CHECK: %[[VAL_32:.*]] = arith.select %[[VAL_30]], %[[VAL_31]], %[[VAL_22]] : index
// CHECK: %[[VAL_33:.*]] = arith.addi %[[VAL_23]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_32]], %[[VAL_33]] : index, index
// CHECK: }
@@ -239,7 +239,7 @@ func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_32:.*]] = arith.cmpi eq, %[[VAL_22]], %[[VAL_21]] : index
// CHECK: %[[VAL_33:.*]] = arith.addi %[[VAL_20]], %[[VAL_7]] : index
-// CHECK: %[[VAL_34:.*]] = select %[[VAL_32]], %[[VAL_33]], %[[VAL_20]] : index
+// CHECK: %[[VAL_34:.*]] = arith.select %[[VAL_32]], %[[VAL_33]], %[[VAL_20]] : index
// CHECK: %[[VAL_35:.*]] = arith.addi %[[VAL_21]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_34]], %[[VAL_35]] : index, index
// CHECK: }
@@ -356,7 +356,7 @@ func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_33]], %[[VAL_7]] : index
-// CHECK: %[[VAL_43:.*]] = select %[[VAL_41]], %[[VAL_42]], %[[VAL_33]] : index
+// CHECK: %[[VAL_43:.*]] = arith.select %[[VAL_41]], %[[VAL_42]], %[[VAL_33]] : index
// CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_43]], %[[VAL_44]] : index, index
// CHECK: }
@@ -375,7 +375,7 @@ func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_50:.*]] = arith.cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_22]], %[[VAL_7]] : index
-// CHECK: %[[VAL_52:.*]] = select %[[VAL_50]], %[[VAL_51]], %[[VAL_22]] : index
+// CHECK: %[[VAL_52:.*]] = arith.select %[[VAL_50]], %[[VAL_51]], %[[VAL_22]] : index
// CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_23]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_52]], %[[VAL_53]] : index, index
// CHECK: }
@@ -476,7 +476,7 @@ func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = arith.cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
-// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
+// CHECK: %[[VAL_32:.*]] = arith.select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = arith.cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = arith.andi %[[VAL_33]], %[[VAL_34]] : i1
@@ -497,7 +497,7 @@ func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: %[[VAL_50:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
// CHECK: %[[VAL_51:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = arith.cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
-// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
+// CHECK: %[[VAL_53:.*]] = arith.select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = arith.cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = arith.cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = arith.andi %[[VAL_54]], %[[VAL_55]] : i1
@@ -522,10 +522,10 @@ func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_64:.*]] = arith.cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_65:.*]] = arith.addi %[[VAL_48]], %[[VAL_4]] : index
-// CHECK: %[[VAL_66:.*]] = select %[[VAL_64]], %[[VAL_65]], %[[VAL_48]] : index
+// CHECK: %[[VAL_66:.*]] = arith.select %[[VAL_64]], %[[VAL_65]], %[[VAL_48]] : index
// CHECK: %[[VAL_67:.*]] = arith.cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_68:.*]] = arith.addi %[[VAL_49]], %[[VAL_4]] : index
-// CHECK: %[[VAL_69:.*]] = select %[[VAL_67]], %[[VAL_68]], %[[VAL_49]] : index
+// CHECK: %[[VAL_69:.*]] = arith.select %[[VAL_67]], %[[VAL_68]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_66]], %[[VAL_69]] : index, index
// CHECK: }
// CHECK: scf.for %[[VAL_70:.*]] = %[[VAL_71:.*]]#0 to %[[VAL_38]] step %[[VAL_4]] {
@@ -566,10 +566,10 @@ func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %argx: te
// CHECK: }
// CHECK: %[[VAL_92:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_93:.*]] = arith.addi %[[VAL_27]], %[[VAL_4]] : index
-// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_27]] : index
+// CHECK: %[[VAL_94:.*]] = arith.select %[[VAL_92]], %[[VAL_93]], %[[VAL_27]] : index
// CHECK: %[[VAL_95:.*]] = arith.cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_96:.*]] = arith.addi %[[VAL_28]], %[[VAL_4]] : index
-// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_28]] : index
+// CHECK: %[[VAL_97:.*]] = arith.select %[[VAL_95]], %[[VAL_96]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]] : index, index
// CHECK: }
// CHECK: scf.for %[[VAL_98:.*]] = %[[VAL_99:.*]]#0 to %[[VAL_18]] step %[[VAL_4]] {
@@ -641,7 +641,7 @@ func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
// CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_27]]] : memref<?xindex>
// CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_28]]] : memref<?xindex>
// CHECK: %[[VAL_31:.*]] = arith.cmpi ult, %[[VAL_30]], %[[VAL_29]] : index
-// CHECK: %[[VAL_32:.*]] = select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
+// CHECK: %[[VAL_32:.*]] = arith.select %[[VAL_31]], %[[VAL_30]], %[[VAL_29]] : index
// CHECK: %[[VAL_33:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_34:.*]] = arith.cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_35:.*]] = arith.andi %[[VAL_33]], %[[VAL_34]] : i1
@@ -662,7 +662,7 @@ func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
// CHECK: %[[VAL_50:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_48]]] : memref<?xindex>
// CHECK: %[[VAL_51:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_49]]] : memref<?xindex>
// CHECK: %[[VAL_52:.*]] = arith.cmpi ult, %[[VAL_51]], %[[VAL_50]] : index
-// CHECK: %[[VAL_53:.*]] = select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
+// CHECK: %[[VAL_53:.*]] = arith.select %[[VAL_52]], %[[VAL_51]], %[[VAL_50]] : index
// CHECK: %[[VAL_54:.*]] = arith.cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_55:.*]] = arith.cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_56:.*]] = arith.andi %[[VAL_54]], %[[VAL_55]] : i1
@@ -675,20 +675,20 @@ func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_60:.*]] = arith.cmpi eq, %[[VAL_50]], %[[VAL_53]] : index
// CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_48]], %[[VAL_4]] : index
-// CHECK: %[[VAL_62:.*]] = select %[[VAL_60]], %[[VAL_61]], %[[VAL_48]] : index
+// CHECK: %[[VAL_62:.*]] = arith.select %[[VAL_60]], %[[VAL_61]], %[[VAL_48]] : index
// CHECK: %[[VAL_63:.*]] = arith.cmpi eq, %[[VAL_51]], %[[VAL_53]] : index
// CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_49]], %[[VAL_4]] : index
-// CHECK: %[[VAL_65:.*]] = select %[[VAL_63]], %[[VAL_64]], %[[VAL_49]] : index
+// CHECK: %[[VAL_65:.*]] = arith.select %[[VAL_63]], %[[VAL_64]], %[[VAL_49]] : index
// CHECK: scf.yield %[[VAL_62]], %[[VAL_65]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_66:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_32]] : index
// CHECK: %[[VAL_67:.*]] = arith.addi %[[VAL_27]], %[[VAL_4]] : index
-// CHECK: %[[VAL_68:.*]] = select %[[VAL_66]], %[[VAL_67]], %[[VAL_27]] : index
+// CHECK: %[[VAL_68:.*]] = arith.select %[[VAL_66]], %[[VAL_67]], %[[VAL_27]] : index
// CHECK: %[[VAL_69:.*]] = arith.cmpi eq, %[[VAL_30]], %[[VAL_32]] : index
// CHECK: %[[VAL_70:.*]] = arith.addi %[[VAL_28]], %[[VAL_4]] : index
-// CHECK: %[[VAL_71:.*]] = select %[[VAL_69]], %[[VAL_70]], %[[VAL_28]] : index
+// CHECK: %[[VAL_71:.*]] = arith.select %[[VAL_69]], %[[VAL_70]], %[[VAL_28]] : index
// CHECK: scf.yield %[[VAL_68]], %[[VAL_71]] : index, index
// CHECK: }
// CHECK: %[[VAL_72:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<32x16xf32>
@@ -759,7 +759,7 @@ func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_43:.*]] = arith.cmpi eq, %[[VAL_35]], %[[VAL_34]] : index
// CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_33]], %[[VAL_7]] : index
-// CHECK: %[[VAL_45:.*]] = select %[[VAL_43]], %[[VAL_44]], %[[VAL_33]] : index
+// CHECK: %[[VAL_45:.*]] = arith.select %[[VAL_43]], %[[VAL_44]], %[[VAL_33]] : index
// CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_45]], %[[VAL_46]] : index, index
// CHECK: }
@@ -784,7 +784,7 @@ func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_58:.*]] = arith.cmpi eq, %[[VAL_24]], %[[VAL_23]] : index
// CHECK: %[[VAL_59:.*]] = arith.addi %[[VAL_22]], %[[VAL_7]] : index
-// CHECK: %[[VAL_60:.*]] = select %[[VAL_58]], %[[VAL_59]], %[[VAL_22]] : index
+// CHECK: %[[VAL_60:.*]] = arith.select %[[VAL_58]], %[[VAL_59]], %[[VAL_22]] : index
// CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_23]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_60]], %[[VAL_61]] : index, index
// CHECK: }
@@ -1155,10 +1155,10 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
// CHECK: %[[VAL_61:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_57]]] : memref<?xindex>
// CHECK: %[[VAL_62:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_58]]] : memref<?xindex>
// CHECK: %[[VAL_63:.*]] = arith.cmpi ult, %[[VAL_62]], %[[VAL_61]] : index
-// CHECK: %[[VAL_64:.*]] = select %[[VAL_63]], %[[VAL_62]], %[[VAL_61]] : index
+// CHECK: %[[VAL_64:.*]] = arith.select %[[VAL_63]], %[[VAL_62]], %[[VAL_61]] : index
// CHECK: %[[VAL_65:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_59]]] : memref<?xindex>
// CHECK: %[[VAL_66:.*]] = arith.cmpi ult, %[[VAL_65]], %[[VAL_64]] : index
-// CHECK: %[[VAL_67:.*]] = select %[[VAL_66]], %[[VAL_65]], %[[VAL_64]] : index
+// CHECK: %[[VAL_67:.*]] = arith.select %[[VAL_66]], %[[VAL_65]], %[[VAL_64]] : index
// CHECK: %[[VAL_68:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_67]] : index
// CHECK: %[[VAL_69:.*]] = arith.cmpi eq, %[[VAL_62]], %[[VAL_67]] : index
// CHECK: %[[VAL_70:.*]] = arith.andi %[[VAL_68]], %[[VAL_69]] : i1
@@ -1201,13 +1201,13 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_98:.*]] = arith.cmpi eq, %[[VAL_61]], %[[VAL_67]] : index
// CHECK: %[[VAL_99:.*]] = arith.addi %[[VAL_57]], %[[VAL_7]] : index
-// CHECK: %[[VAL_100:.*]] = select %[[VAL_98]], %[[VAL_99]], %[[VAL_57]] : index
+// CHECK: %[[VAL_100:.*]] = arith.select %[[VAL_98]], %[[VAL_99]], %[[VAL_57]] : index
// CHECK: %[[VAL_101:.*]] = arith.cmpi eq, %[[VAL_62]], %[[VAL_67]] : index
// CHECK: %[[VAL_102:.*]] = arith.addi %[[VAL_58]], %[[VAL_7]] : index
-// CHECK: %[[VAL_103:.*]] = select %[[VAL_101]], %[[VAL_102]], %[[VAL_58]] : index
+// CHECK: %[[VAL_103:.*]] = arith.select %[[VAL_101]], %[[VAL_102]], %[[VAL_58]] : index
// CHECK: %[[VAL_104:.*]] = arith.cmpi eq, %[[VAL_65]], %[[VAL_67]] : index
// CHECK: %[[VAL_105:.*]] = arith.addi %[[VAL_59]], %[[VAL_7]] : index
-// CHECK: %[[VAL_106:.*]] = select %[[VAL_104]], %[[VAL_105]], %[[VAL_59]] : index
+// CHECK: %[[VAL_106:.*]] = arith.select %[[VAL_104]], %[[VAL_105]], %[[VAL_59]] : index
// CHECK: scf.yield %[[VAL_100]], %[[VAL_103]], %[[VAL_106]], %[[VAL_107:.*]] : index, index, index, f32
// CHECK: }
// CHECK: %[[VAL_108:.*]]:3 = scf.while (%[[VAL_109:.*]] = %[[VAL_110:.*]]#0, %[[VAL_111:.*]] = %[[VAL_110]]#1, %[[VAL_112:.*]] = %[[VAL_110]]#3) : (index, index, f32) -> (index, index, f32) {
@@ -1220,7 +1220,7 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
// CHECK: %[[VAL_119:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_116]]] : memref<?xindex>
// CHECK: %[[VAL_120:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_117]]] : memref<?xindex>
// CHECK: %[[VAL_121:.*]] = arith.cmpi ult, %[[VAL_120]], %[[VAL_119]] : index
-// CHECK: %[[VAL_122:.*]] = select %[[VAL_121]], %[[VAL_120]], %[[VAL_119]] : index
+// CHECK: %[[VAL_122:.*]] = arith.select %[[VAL_121]], %[[VAL_120]], %[[VAL_119]] : index
// CHECK: %[[VAL_123:.*]] = arith.cmpi eq, %[[VAL_119]], %[[VAL_122]] : index
// CHECK: %[[VAL_124:.*]] = arith.cmpi eq, %[[VAL_120]], %[[VAL_122]] : index
// CHECK: %[[VAL_125:.*]] = arith.andi %[[VAL_123]], %[[VAL_124]] : i1
@@ -1237,10 +1237,10 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_133:.*]] = arith.cmpi eq, %[[VAL_119]], %[[VAL_122]] : index
// CHECK: %[[VAL_134:.*]] = arith.addi %[[VAL_116]], %[[VAL_7]] : index
-// CHECK: %[[VAL_135:.*]] = select %[[VAL_133]], %[[VAL_134]], %[[VAL_116]] : index
+// CHECK: %[[VAL_135:.*]] = arith.select %[[VAL_133]], %[[VAL_134]], %[[VAL_116]] : index
// CHECK: %[[VAL_136:.*]] = arith.cmpi eq, %[[VAL_120]], %[[VAL_122]] : index
// CHECK: %[[VAL_137:.*]] = arith.addi %[[VAL_117]], %[[VAL_7]] : index
-// CHECK: %[[VAL_138:.*]] = select %[[VAL_136]], %[[VAL_137]], %[[VAL_117]] : index
+// CHECK: %[[VAL_138:.*]] = arith.select %[[VAL_136]], %[[VAL_137]], %[[VAL_117]] : index
// CHECK: scf.yield %[[VAL_135]], %[[VAL_138]], %[[VAL_139:.*]] : index, index, f32
// CHECK: }
// CHECK: %[[VAL_140:.*]] = scf.for %[[VAL_141:.*]] = %[[VAL_142:.*]]#2 to %[[VAL_46]] step %[[VAL_7]] iter_args(%[[VAL_143:.*]] = %[[VAL_144:.*]]#2) -> (f32) {
@@ -1266,7 +1266,7 @@ func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
// CHECK: }
// CHECK: %[[VAL_158:.*]] = arith.cmpi eq, %[[VAL_34]], %[[VAL_33]] : index
// CHECK: %[[VAL_159:.*]] = arith.addi %[[VAL_32]], %[[VAL_7]] : index
-// CHECK: %[[VAL_160:.*]] = select %[[VAL_158]], %[[VAL_159]], %[[VAL_32]] : index
+// CHECK: %[[VAL_160:.*]] = arith.select %[[VAL_158]], %[[VAL_159]], %[[VAL_32]] : index
// CHECK: %[[VAL_161:.*]] = arith.addi %[[VAL_33]], %[[VAL_7]] : index
// CHECK: scf.yield %[[VAL_160]], %[[VAL_161]] : index, index
// CHECK: }
diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
index d1acf1f00c916..aea77ac313eb5 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir
@@ -152,7 +152,7 @@ func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_35:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_28]] : index
// CHECK: %[[VAL_36:.*]] = arith.addi %[[VAL_27]], %[[VAL_9]] : index
-// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_27]] : index
+// CHECK: %[[VAL_37:.*]] = arith.select %[[VAL_35]], %[[VAL_36]], %[[VAL_27]] : index
// CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_28]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_37]], %[[VAL_38]] : index, index
// CHECK: }
@@ -270,7 +270,7 @@ func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_35:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: %[[VAL_36:.*]] = arith.addi %[[VAL_23]], %[[VAL_8]] : index
-// CHECK: %[[VAL_37:.*]] = select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
+// CHECK: %[[VAL_37:.*]] = arith.select %[[VAL_35]], %[[VAL_36]], %[[VAL_23]] : index
// CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_37]], %[[VAL_38]] : index, index
// CHECK: }
@@ -396,7 +396,7 @@ func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_45:.*]] = arith.cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
// CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_37]], %[[VAL_9]] : index
-// CHECK: %[[VAL_47:.*]] = select %[[VAL_45]], %[[VAL_46]], %[[VAL_37]] : index
+// CHECK: %[[VAL_47:.*]] = arith.select %[[VAL_45]], %[[VAL_46]], %[[VAL_37]] : index
// CHECK: %[[VAL_48:.*]] = arith.addi %[[VAL_38]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_47]], %[[VAL_48]] : index, index
// CHECK: }
@@ -415,7 +415,7 @@ func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_54:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: %[[VAL_55:.*]] = arith.addi %[[VAL_26]], %[[VAL_9]] : index
-// CHECK: %[[VAL_56:.*]] = select %[[VAL_54]], %[[VAL_55]], %[[VAL_26]] : index
+// CHECK: %[[VAL_56:.*]] = arith.select %[[VAL_54]], %[[VAL_55]], %[[VAL_26]] : index
// CHECK: %[[VAL_57:.*]] = arith.addi %[[VAL_27]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_56]], %[[VAL_57]] : index, index
// CHECK: }
@@ -541,7 +541,7 @@ func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_37:.*]] = arith.cmpi eq, %[[VAL_23]], %[[VAL_22]] : index
// CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_21]], %[[VAL_8]] : index
-// CHECK: %[[VAL_39:.*]] = select %[[VAL_37]], %[[VAL_38]], %[[VAL_21]] : index
+// CHECK: %[[VAL_39:.*]] = arith.select %[[VAL_37]], %[[VAL_38]], %[[VAL_21]] : index
// CHECK: %[[VAL_40:.*]] = arith.addi %[[VAL_22]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_39]], %[[VAL_40]] : index, index
// CHECK: }
@@ -670,7 +670,7 @@ func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_40]], %[[VAL_39]] : index
// CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_38]], %[[VAL_9]] : index
-// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_38]] : index
+// CHECK: %[[VAL_48:.*]] = arith.select %[[VAL_46]], %[[VAL_47]], %[[VAL_38]] : index
// CHECK: %[[VAL_49:.*]] = arith.addi %[[VAL_39]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_48]], %[[VAL_49]] : index, index
// CHECK: }
@@ -692,7 +692,7 @@ func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_56:.*]] = arith.cmpi eq, %[[VAL_26]], %[[VAL_25]] : index
// CHECK: %[[VAL_57:.*]] = arith.addi %[[VAL_24]], %[[VAL_9]] : index
-// CHECK: %[[VAL_58:.*]] = select %[[VAL_56]], %[[VAL_57]], %[[VAL_24]] : index
+// CHECK: %[[VAL_58:.*]] = arith.select %[[VAL_56]], %[[VAL_57]], %[[VAL_24]] : index
// CHECK: %[[VAL_59:.*]] = arith.addi %[[VAL_25]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_58]], %[[VAL_59]] : index, index
// CHECK: }
@@ -827,7 +827,7 @@ func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_46:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_35]] : index
// CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_34]], %[[VAL_8]] : index
-// CHECK: %[[VAL_48:.*]] = select %[[VAL_46]], %[[VAL_47]], %[[VAL_34]] : index
+// CHECK: %[[VAL_48:.*]] = arith.select %[[VAL_46]], %[[VAL_47]], %[[VAL_34]] : index
// CHECK: %[[VAL_49:.*]] = arith.addi %[[VAL_35]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_48]], %[[VAL_49]] : index, index
// CHECK: }
@@ -850,7 +850,7 @@ func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_57:.*]] = arith.cmpi eq, %[[VAL_25]], %[[VAL_24]] : index
// CHECK: %[[VAL_58:.*]] = arith.addi %[[VAL_23]], %[[VAL_8]] : index
-// CHECK: %[[VAL_59:.*]] = select %[[VAL_57]], %[[VAL_58]], %[[VAL_23]] : index
+// CHECK: %[[VAL_59:.*]] = arith.select %[[VAL_57]], %[[VAL_58]], %[[VAL_23]] : index
// CHECK: %[[VAL_60:.*]] = arith.addi %[[VAL_24]], %[[VAL_8]] : index
// CHECK: scf.yield %[[VAL_59]], %[[VAL_60]] : index, index
// CHECK: }
@@ -992,7 +992,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_56:.*]] = arith.cmpi eq, %[[VAL_50]], %[[VAL_49]] : index
// CHECK: %[[VAL_57:.*]] = arith.addi %[[VAL_48]], %[[VAL_9]] : index
-// CHECK: %[[VAL_58:.*]] = select %[[VAL_56]], %[[VAL_57]], %[[VAL_48]] : index
+// CHECK: %[[VAL_58:.*]] = arith.select %[[VAL_56]], %[[VAL_57]], %[[VAL_48]] : index
// CHECK: %[[VAL_59:.*]] = arith.addi %[[VAL_49]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_58]], %[[VAL_59]] : index, index
// CHECK: }
@@ -1011,7 +1011,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_65:.*]] = arith.cmpi eq, %[[VAL_39]], %[[VAL_38]] : index
// CHECK: %[[VAL_66:.*]] = arith.addi %[[VAL_37]], %[[VAL_9]] : index
-// CHECK: %[[VAL_67:.*]] = select %[[VAL_65]], %[[VAL_66]], %[[VAL_37]] : index
+// CHECK: %[[VAL_67:.*]] = arith.select %[[VAL_65]], %[[VAL_66]], %[[VAL_37]] : index
// CHECK: %[[VAL_68:.*]] = arith.addi %[[VAL_38]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_67]], %[[VAL_68]] : index, index
// CHECK: }
@@ -1034,7 +1034,7 @@ func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>, %ar
// CHECK: }
// CHECK: %[[VAL_76:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_27]] : index
// CHECK: %[[VAL_77:.*]] = arith.addi %[[VAL_26]], %[[VAL_9]] : index
-// CHECK: %[[VAL_78:.*]] = select %[[VAL_76]], %[[VAL_77]], %[[VAL_26]] : index
+// CHECK: %[[VAL_78:.*]] = arith.select %[[VAL_76]], %[[VAL_77]], %[[VAL_26]] : index
// CHECK: %[[VAL_79:.*]] = arith.addi %[[VAL_27]], %[[VAL_9]] : index
// CHECK: scf.yield %[[VAL_78]], %[[VAL_79]] : index, index
// CHECK: }
diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
index cf81e45f2432d..4b0d389bb1870 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
@@ -194,7 +194,7 @@ func @neg(%arga: tensor<32xf64, #SV>,
// CHECK: }
// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index
// CHECK: }
@@ -255,7 +255,7 @@ func @add(%arga: tensor<32xf64, #SV>,
// CHECK: }
// CHECK: %[[VAL_27:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_29:.*]] = select %[[VAL_27]], %[[VAL_28]], %[[VAL_18]] : index
+// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_27]], %[[VAL_28]], %[[VAL_18]] : index
// CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index
// CHECK: }
diff --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
index c60fcd0ca587e..a865dc2bec116 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir
@@ -58,7 +58,7 @@
// CHECK: }
// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index
// CHECK: }
@@ -120,7 +120,7 @@ func @add(%arga: tensor<32xi64, #SV>,
// CHECK: }
// CHECK: %[[VAL_28:.*]] = arith.cmpi eq, %[[VAL_21]], %[[VAL_20]] : index
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
-// CHECK: %[[VAL_30:.*]] = select %[[VAL_28]], %[[VAL_29]], %[[VAL_19]] : index
+// CHECK: %[[VAL_30:.*]] = arith.select %[[VAL_28]], %[[VAL_29]], %[[VAL_19]] : index
// CHECK: %[[VAL_31:.*]] = arith.addi %[[VAL_20]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_30]], %[[VAL_31]] : index, index
// CHECK: }
@@ -321,7 +321,7 @@ func @and(%arga: tensor<32xi64, #SV>,
// CHECK: }
// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index
// CHECK: }
@@ -381,7 +381,7 @@ func @or(%arga: tensor<32xi64, #SV>,
// CHECK: }
// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index
-// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
+// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index
// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index
// CHECK: }
diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
index 610c59847bc92..7d8461ce2e167 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir
@@ -97,7 +97,7 @@ func @matmul1(%a: tensor<10x20xf32, #DCSR>,
// CHECK: %[[VAL_43:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_40]]] : memref<?xindex>
// CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_41]]] : memref<?xindex>
// CHECK: %[[VAL_45:.*]] = arith.cmpi ult, %[[VAL_44]], %[[VAL_43]] : index
-// CHECK: %[[VAL_46:.*]] = select %[[VAL_45]], %[[VAL_44]], %[[VAL_43]] : index
+// CHECK: %[[VAL_46:.*]] = arith.select %[[VAL_45]], %[[VAL_44]], %[[VAL_43]] : index
// CHECK: %[[VAL_47:.*]] = arith.cmpi eq, %[[VAL_43]], %[[VAL_46]] : index
// CHECK: %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_46]] : index
// CHECK: %[[VAL_49:.*]] = arith.andi %[[VAL_47]], %[[VAL_48]] : i1
@@ -131,10 +131,10 @@ func @matmul1(%a: tensor<10x20xf32, #DCSR>,
// CHECK: }
// CHECK: %[[VAL_69:.*]] = arith.cmpi eq, %[[VAL_43]], %[[VAL_46]] : index
// CHECK: %[[VAL_70:.*]] = arith.addi %[[VAL_40]], %[[VAL_4]] : index
-// CHECK: %[[VAL_71:.*]] = select %[[VAL_69]], %[[VAL_70]], %[[VAL_40]] : index
+// CHECK: %[[VAL_71:.*]] = arith.select %[[VAL_69]], %[[VAL_70]], %[[VAL_40]] : index
// CHECK: %[[VAL_72:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_46]] : index
// CHECK: %[[VAL_73:.*]] = arith.addi %[[VAL_41]], %[[VAL_4]] : index
-// CHECK: %[[VAL_74:.*]] = select %[[VAL_72]], %[[VAL_73]], %[[VAL_41]] : index
+// CHECK: %[[VAL_74:.*]] = arith.select %[[VAL_72]], %[[VAL_73]], %[[VAL_41]] : index
// CHECK: scf.yield %[[VAL_71]], %[[VAL_74]], %[[VAL_75:.*]] : index, index, index
// CHECK: }
// CHECK: sparse_tensor.compress %[[VAL_8]], %[[VAL_19]], %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_76:.*]]#2 : tensor<4x4xf64, #sparse_tensor.encoding<{{{.*}}}>>, memref<?xindex>, memref<?xf64>, memref<?xi1>, memref<?xindex>, index
diff --git a/mlir/test/Dialect/SparseTensor/sparse_out.mlir b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
index 652ba3e3a587a..13a984b8e8d25 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_out.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_out.mlir
@@ -192,7 +192,7 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_34]]] : memref<?xindex>
// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_35]]] : memref<?xindex>
// CHECK: %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_36]] : index
-// CHECK: %[[VAL_39:.*]] = select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_39:.*]] = arith.select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
// CHECK: memref.store %[[VAL_39]], %[[VAL_23]]{{\[}}%[[VAL_2]]] : memref<?xindex>
// CHECK: %[[VAL_40:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
// CHECK: %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
@@ -214,7 +214,7 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
// CHECK: %[[VAL_57:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_55]]] : memref<?xindex>
// CHECK: %[[VAL_58:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_56]]] : memref<?xindex>
// CHECK: %[[VAL_59:.*]] = arith.cmpi ult, %[[VAL_58]], %[[VAL_57]] : index
-// CHECK: %[[VAL_60:.*]] = select %[[VAL_59]], %[[VAL_58]], %[[VAL_57]] : index
+// CHECK: %[[VAL_60:.*]] = arith.select %[[VAL_59]], %[[VAL_58]], %[[VAL_57]] : index
// CHECK: memref.store %[[VAL_60]], %[[VAL_23]]{{\[}}%[[VAL_3]]] : memref<?xindex>
// CHECK: %[[VAL_61:.*]] = arith.cmpi eq, %[[VAL_57]], %[[VAL_60]] : index
// CHECK: %[[VAL_62:.*]] = arith.cmpi eq, %[[VAL_58]], %[[VAL_60]] : index
@@ -236,7 +236,7 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
// CHECK: %[[VAL_80:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_77]]] : memref<?xindex>
// CHECK: %[[VAL_81:.*]] = memref.load %[[VAL_21]]{{\[}}%[[VAL_78]]] : memref<?xindex>
// CHECK: %[[VAL_82:.*]] = arith.cmpi ult, %[[VAL_81]], %[[VAL_80]] : index
-// CHECK: %[[VAL_83:.*]] = select %[[VAL_82]], %[[VAL_81]], %[[VAL_80]] : index
+// CHECK: %[[VAL_83:.*]] = arith.select %[[VAL_82]], %[[VAL_81]], %[[VAL_80]] : index
// CHECK: memref.store %[[VAL_83]], %[[VAL_23]]{{\[}}%[[VAL_4]]] : memref<?xindex>
// CHECK: %[[VAL_84:.*]] = arith.cmpi eq, %[[VAL_80]], %[[VAL_83]] : index
// CHECK: %[[VAL_85:.*]] = arith.cmpi eq, %[[VAL_81]], %[[VAL_83]] : index
@@ -252,10 +252,10 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
// CHECK: }
// CHECK: %[[VAL_92:.*]] = arith.cmpi eq, %[[VAL_80]], %[[VAL_83]] : index
// CHECK: %[[VAL_93:.*]] = arith.addi %[[VAL_77]], %[[VAL_3]] : index
-// CHECK: %[[VAL_94:.*]] = select %[[VAL_92]], %[[VAL_93]], %[[VAL_77]] : index
+// CHECK: %[[VAL_94:.*]] = arith.select %[[VAL_92]], %[[VAL_93]], %[[VAL_77]] : index
// CHECK: %[[VAL_95:.*]] = arith.cmpi eq, %[[VAL_81]], %[[VAL_83]] : index
// CHECK: %[[VAL_96:.*]] = arith.addi %[[VAL_78]], %[[VAL_3]] : index
-// CHECK: %[[VAL_97:.*]] = select %[[VAL_95]], %[[VAL_96]], %[[VAL_78]] : index
+// CHECK: %[[VAL_97:.*]] = arith.select %[[VAL_95]], %[[VAL_96]], %[[VAL_78]] : index
// CHECK: scf.yield %[[VAL_94]], %[[VAL_97]], %[[VAL_98:.*]] : index, index, i32
// CHECK: }
// CHECK: sparse_tensor.lex_insert %[[VAL_8]], %[[VAL_23]], %[[VAL_99:.*]]#2 : tensor<?x?xi32, #{{.*}}>, memref<?xindex>, i32
@@ -263,20 +263,20 @@ func @sparse_truly_dynamic(%arga: tensor<10x20xf32, #CSR>) -> tensor<10x20xf32,
// CHECK: }
// CHECK: %[[VAL_100:.*]] = arith.cmpi eq, %[[VAL_57]], %[[VAL_60]] : index
// CHECK: %[[VAL_101:.*]] = arith.addi %[[VAL_55]], %[[VAL_3]] : index
-// CHECK: %[[VAL_102:.*]] = select %[[VAL_100]], %[[VAL_101]], %[[VAL_55]] : index
+// CHECK: %[[VAL_102:.*]] = arith.select %[[VAL_100]], %[[VAL_101]], %[[VAL_55]] : index
// CHECK: %[[VAL_103:.*]] = arith.cmpi eq, %[[VAL_58]], %[[VAL_60]] : index
// CHECK: %[[VAL_104:.*]] = arith.addi %[[VAL_56]], %[[VAL_3]] : index
-// CHECK: %[[VAL_105:.*]] = select %[[VAL_103]], %[[VAL_104]], %[[VAL_56]] : index
+// CHECK: %[[VAL_105:.*]] = arith.select %[[VAL_103]], %[[VAL_104]], %[[VAL_56]] : index
// CHECK: scf.yield %[[VAL_102]], %[[VAL_105]] : index, index
// CHECK: }
// CHECK: } else {
// CHECK: }
// CHECK: %[[VAL_106:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
// CHECK: %[[VAL_107:.*]] = arith.addi %[[VAL_34]], %[[VAL_3]] : index
-// CHECK: %[[VAL_108:.*]] = select %[[VAL_106]], %[[VAL_107]], %[[VAL_34]] : index
+// CHECK: %[[VAL_108:.*]] = arith.select %[[VAL_106]], %[[VAL_107]], %[[VAL_34]] : index
// CHECK: %[[VAL_109:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
// CHECK: %[[VAL_110:.*]] = arith.addi %[[VAL_35]], %[[VAL_3]] : index
-// CHECK: %[[VAL_111:.*]] = select %[[VAL_109]], %[[VAL_110]], %[[VAL_35]] : index
+// CHECK: %[[VAL_111:.*]] = arith.select %[[VAL_109]], %[[VAL_110]], %[[VAL_35]] : index
// CHECK: scf.yield %[[VAL_108]], %[[VAL_111]] : index, index
// CHECK: }
// CHECK: %[[VAL_112:.*]] = sparse_tensor.load %[[VAL_8]] hasInserts : tensor<?x?xi32, #{{.*}}>
@@ -354,7 +354,7 @@ func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
// CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_41]]] : memref<?xindex>
// CHECK: %[[VAL_45:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_42]]] : memref<?xindex>
// CHECK: %[[VAL_46:.*]] = arith.cmpi ult, %[[VAL_45]], %[[VAL_44]] : index
-// CHECK: %[[VAL_47:.*]] = select %[[VAL_46]], %[[VAL_45]], %[[VAL_44]] : index
+// CHECK: %[[VAL_47:.*]] = arith.select %[[VAL_46]], %[[VAL_45]], %[[VAL_44]] : index
// CHECK: %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index
// CHECK: %[[VAL_49:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index
// CHECK: %[[VAL_50:.*]] = arith.andi %[[VAL_48]], %[[VAL_49]] : i1
@@ -388,10 +388,10 @@ func @sumred(%arga: tensor<?x?x?xi32, #SparseTensor>,
// CHECK: }
// CHECK: %[[VAL_70:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index
// CHECK: %[[VAL_71:.*]] = arith.addi %[[VAL_41]], %[[VAL_3]] : index
-// CHECK: %[[VAL_72:.*]] = select %[[VAL_70]], %[[VAL_71]], %[[VAL_41]] : index
+// CHECK: %[[VAL_72:.*]] = arith.select %[[VAL_70]], %[[VAL_71]], %[[VAL_41]] : index
// CHECK: %[[VAL_73:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index
// CHECK: %[[VAL_74:.*]] = arith.addi %[[VAL_42]], %[[VAL_3]] : index
-// CHECK: %[[VAL_75:.*]] = select %[[VAL_73]], %[[VAL_74]], %[[VAL_42]] : index
+// CHECK: %[[VAL_75:.*]] = arith.select %[[VAL_73]], %[[VAL_74]], %[[VAL_42]] : index
// CHECK: scf.yield %[[VAL_72]], %[[VAL_75]], %[[VAL_76:.*]] : index, index, index
// CHECK: }
// CHECK: sparse_tensor.compress %[[VAL_9]], %[[VAL_20]], %[[VAL_25]], %[[VAL_26]], %[[VAL_27]], %[[VAL_77:.*]]#2 : tensor<?x?xf32, #sparse_tensor.encoding<{{{.*}}}>>, memref<?xindex>, memref<?xf32>, memref<?xi1>, memref<?xindex>, index
diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
index 4480e6b0e0f6b..dc55580d78fe6 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir
@@ -51,7 +51,7 @@
// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_33]]] : memref<?xindex>
// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_34]]] : memref<?xindex>
// CHECK: %[[VAL_38:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_36]] : index
-// CHECK: %[[VAL_39:.*]] = select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
+// CHECK: %[[VAL_39:.*]] = arith.select %[[VAL_38]], %[[VAL_37]], %[[VAL_36]] : index
// CHECK: %[[VAL_40:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
// CHECK: %[[VAL_41:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
// CHECK: %[[VAL_42:.*]] = arith.andi %[[VAL_40]], %[[VAL_41]] : i1
@@ -82,10 +82,10 @@
// CHECK: }
// CHECK: %[[VAL_58:.*]] = arith.cmpi eq, %[[VAL_36]], %[[VAL_39]] : index
// CHECK: %[[VAL_59:.*]] = arith.addi %[[VAL_33]], %[[VAL_8]] : index
-// CHECK: %[[VAL_60:.*]] = select %[[VAL_58]], %[[VAL_59]], %[[VAL_33]] : index
+// CHECK: %[[VAL_60:.*]] = arith.select %[[VAL_58]], %[[VAL_59]], %[[VAL_33]] : index
// CHECK: %[[VAL_61:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_39]] : index
// CHECK: %[[VAL_62:.*]] = arith.addi %[[VAL_34]], %[[VAL_8]] : index
-// CHECK: %[[VAL_63:.*]] = select %[[VAL_61]], %[[VAL_62]], %[[VAL_34]] : index
+// CHECK: %[[VAL_63:.*]] = arith.select %[[VAL_61]], %[[VAL_62]], %[[VAL_34]] : index
// CHECK: scf.yield %[[VAL_60]], %[[VAL_63]], %[[VAL_64:.*]] : index, index, f64
// CHECK: }
// CHECK: %[[VAL_65:.*]] = vector.insertelement %[[VAL_66:.*]]#2, %[[VAL_3]]{{\[}}%[[VAL_6]] : index] : vector<8xf64>
@@ -94,7 +94,7 @@
// CHECK: %[[VAL_71:.*]] = vector.create_mask %[[VAL_70]] : vector<8xi1>
// CHECK: %[[VAL_72:.*]] = vector.maskedload %[[VAL_11]]{{\[}}%[[VAL_68]]], %[[VAL_71]], %[[VAL_3]] : memref<?xf64>, vector<8xi1>, vector<8xf64> into vector<8xf64>
// CHECK: %[[VAL_73:.*]] = arith.addf %[[VAL_69]], %[[VAL_72]] : vector<8xf64>
-// CHECK: %[[VAL_74:.*]] = select %[[VAL_71]], %[[VAL_73]], %[[VAL_69]] : vector<8xi1>, vector<8xf64>
+// CHECK: %[[VAL_74:.*]] = arith.select %[[VAL_71]], %[[VAL_73]], %[[VAL_69]] : vector<8xi1>, vector<8xf64>
// CHECK: scf.yield %[[VAL_74]] : vector<8xf64>
// CHECK: }
// CHECK: %[[VAL_75:.*]] = scf.for %[[VAL_76:.*]] = %[[VAL_66]]#1 to %[[VAL_25]] step %[[VAL_4]] iter_args(%[[VAL_77:.*]] = %[[VAL_78:.*]]) -> (vector<8xf64>) {
@@ -102,7 +102,7 @@
// CHECK: %[[VAL_80:.*]] = vector.create_mask %[[VAL_79]] : vector<8xi1>
// CHECK: %[[VAL_81:.*]] = vector.maskedload %[[VAL_14]]{{\[}}%[[VAL_76]]], %[[VAL_80]], %[[VAL_3]] : memref<?xf64>, vector<8xi1>, vector<8xf64> into vector<8xf64>
// CHECK: %[[VAL_82:.*]] = arith.addf %[[VAL_77]], %[[VAL_81]] : vector<8xf64>
-// CHECK: %[[VAL_83:.*]] = select %[[VAL_80]], %[[VAL_82]], %[[VAL_77]] : vector<8xi1>, vector<8xf64>
+// CHECK: %[[VAL_83:.*]] = arith.select %[[VAL_80]], %[[VAL_82]], %[[VAL_77]] : vector<8xi1>, vector<8xf64>
// CHECK: scf.yield %[[VAL_83]] : vector<8xf64>
// CHECK: }
// CHECK: %[[VAL_84:.*]] = vector.reduction "add", %[[VAL_85:.*]] : vector<8xf64> into f64
diff --git a/mlir/test/Dialect/Standard/bufferize.mlir b/mlir/test/Dialect/Standard/bufferize.mlir
deleted file mode 100644
index c7d194b52cda9..0000000000000
--- a/mlir/test/Dialect/Standard/bufferize.mlir
+++ /dev/null
@@ -1,15 +0,0 @@
-// RUN: mlir-opt %s -std-bufferize | FileCheck %s
-
-// CHECK-LABEL: func @select(
-// CHECK-SAME: %[[PRED:.*]]: i1,
-// CHECK-SAME: %[[TRUE_VAL:.*]]: tensor<f32>,
-// CHECK-SAME: %[[FALSE_VAL:.*]]: tensor<f32>) -> tensor<f32> {
-// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : memref<f32>
-// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : memref<f32>
-// CHECK: %[[RET_MEMREF:.*]] = select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref<f32>
-// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref<f32>
-// CHECK: return %[[RET]] : tensor<f32>
-func @select(%arg0: i1, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
- %0 = select %arg0, %arg1, %arg2 : tensor<f32>
- return %0 : tensor<f32>
-}
diff --git a/mlir/test/Dialect/Standard/canonicalize-cf.mlir b/mlir/test/Dialect/Standard/canonicalize-cf.mlir
index 5ed6fab6efcbb..61ebaf742374f 100644
--- a/mlir/test/Dialect/Standard/canonicalize-cf.mlir
+++ b/mlir/test/Dialect/Standard/canonicalize-cf.mlir
@@ -73,8 +73,8 @@ func @cond_br_same_successor(%cond : i1, %a : i32) {
func @cond_br_same_successor_insert_select(
%cond : i1, %a : i32, %b : i32, %c : tensor<2xi32>, %d : tensor<2xi32>
) -> (i32, tensor<2xi32>) {
- // CHECK: %[[RES:.*]] = select %[[COND]], %[[ARG0]], %[[ARG1]]
- // CHECK: %[[RES2:.*]] = select %[[COND]], %[[ARG2]], %[[ARG3]]
+ // CHECK: %[[RES:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG1]]
+ // CHECK: %[[RES2:.*]] = arith.select %[[COND]], %[[ARG2]], %[[ARG3]]
// CHECK: return %[[RES]], %[[RES2]]
cond_br %cond, ^bb1(%a, %c : i32, tensor<2xi32>), ^bb1(%b, %d : i32, tensor<2xi32>)
@@ -105,8 +105,8 @@ func @cond_br_and_br_folding(%a : i32) {
// CHECK-LABEL: func @cond_br_passthrough(
// CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32, %[[COND:.*]]: i1
func @cond_br_passthrough(%arg0 : i32, %arg1 : i32, %arg2 : i32, %cond : i1) -> (i32, i32) {
- // CHECK: %[[RES:.*]] = select %[[COND]], %[[ARG0]], %[[ARG2]]
- // CHECK: %[[RES2:.*]] = select %[[COND]], %[[ARG1]], %[[ARG2]]
+ // CHECK: %[[RES:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG2]]
+ // CHECK: %[[RES2:.*]] = arith.select %[[COND]], %[[ARG1]], %[[ARG2]]
// CHECK: return %[[RES]], %[[RES2]]
cond_br %cond, ^bb1(%arg0 : i32), ^bb2(%arg2, %arg2 : i32, i32)
diff --git a/mlir/test/Dialect/Standard/canonicalize.mlir b/mlir/test/Dialect/Standard/canonicalize.mlir
index 67d95ce0d194c..2a548774fd118 100644
--- a/mlir/test/Dialect/Standard/canonicalize.mlir
+++ b/mlir/test/Dialect/Standard/canonicalize.mlir
@@ -3,7 +3,7 @@
// CHECK-LABEL: @select_same_val
// CHECK: return %arg1
func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
- %0 = select %arg0, %arg1, %arg1 : i64
+ %0 = arith.select %arg0, %arg1, %arg1 : i64
return %0 : i64
}
@@ -13,7 +13,7 @@ func @select_same_val(%arg0: i1, %arg1: i64) -> i64 {
// CHECK: return %arg1
func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi eq, %arg0, %arg1 : i64
- %1 = select %0, %arg0, %arg1 : i64
+ %1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
}
@@ -23,7 +23,7 @@ func @select_cmp_eq_select(%arg0: i64, %arg1: i64) -> i64 {
// CHECK: return %arg0
func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
%0 = arith.cmpi ne, %arg0, %arg1 : i64
- %1 = select %0, %arg0, %arg1 : i64
+ %1 = arith.select %0, %arg0, %arg1 : i64
return %1 : i64
}
@@ -35,7 +35,7 @@ func @select_cmp_ne_select(%arg0: i64, %arg1: i64) -> i64 {
func @select_extui(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
- %res = select %arg0, %c1_i64, %c0_i64 : i64
+ %res = arith.select %arg0, %c1_i64, %c0_i64 : i64
return %res : i64
}
@@ -47,7 +47,7 @@ func @select_extui(%arg0: i1) -> i64 {
func @select_extui2(%arg0: i1) -> i64 {
%c0_i64 = arith.constant 0 : i64
%c1_i64 = arith.constant 1 : i64
- %res = select %arg0, %c0_i64, %c1_i64 : i64
+ %res = arith.select %arg0, %c0_i64, %c1_i64 : i64
return %res : i64
}
@@ -58,7 +58,7 @@ func @select_extui2(%arg0: i1) -> i64 {
func @select_extui_i1(%arg0: i1) -> i1 {
%c0_i1 = arith.constant false
%c1_i1 = arith.constant true
- %res = select %arg0, %c1_i1, %c0_i1 : i1
+ %res = arith.select %arg0, %c1_i1, %c0_i1 : i1
return %res : i1
}
@@ -93,7 +93,7 @@ func @branchCondProp(%arg0: i1) {
func @selToNot(%arg0: i1) -> i1 {
%true = arith.constant true
%false = arith.constant false
- %res = select %arg0, %false, %true : i1
+ %res = arith.select %arg0, %false, %true : i1
return %res : i1
}
@@ -105,6 +105,6 @@ func @selToNot(%arg0: i1) -> i1 {
// CHECK-NEXT: %[[res:.+]] = arith.ori %[[condtrue]], %[[condfalse]] : i1
// CHECK: return %[[res]]
func @selToArith(%arg0: i1, %arg1 : i1, %arg2 : i1) -> i1 {
- %res = select %arg0, %arg1, %arg2 : i1
+ %res = arith.select %arg0, %arg1, %arg2 : i1
return %res : i1
}
diff --git a/mlir/test/Dialect/Standard/expand-tanh.mlir b/mlir/test/Dialect/Standard/expand-tanh.mlir
index 4f809b71bd54e..20e5410ffb705 100644
--- a/mlir/test/Dialect/Standard/expand-tanh.mlir
+++ b/mlir/test/Dialect/Standard/expand-tanh.mlir
@@ -19,5 +19,5 @@ func @tanh(%arg: f32) -> f32 {
// CHECK: %[[DIVISOR2:.+]] = arith.addf %[[EXP2]], %[[ONE]] : f32
// CHECK: %[[RES2:.+]] = arith.divf %[[DIVIDEND2]], %[[DIVISOR2]] : f32
// CHECK: %[[COND:.+]] = arith.cmpf oge, %arg0, %[[ZERO]] : f32
-// CHECK: %[[RESULT:.+]] = select %[[COND]], %[[RES1]], %[[RES2]] : f32
+// CHECK: %[[RESULT:.+]] = arith.select %[[COND]], %[[RES1]], %[[RES2]] : f32
// CHECK: return %[[RESULT]]
diff --git a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
index 1ed83adac71d0..dc45ecb24a631 100644
--- a/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-contract-transforms.mlir
@@ -825,10 +825,10 @@ func @genbool_var_1d(%arg0: index) -> vector<3xi1> {
// CHECK: %[[c1:.*]] = arith.constant 1 : index
// CHECK: %[[T0:.*]] = vector.create_mask %[[B]] : vector<3xi1>
// CHECK: %[[T1:.*]] = arith.cmpi slt, %[[c0]], %[[A]] : index
-// CHECK: %[[T2:.*]] = select %[[T1]], %[[T0]], %[[C1]] : vector<3xi1>
+// CHECK: %[[T2:.*]] = arith.select %[[T1]], %[[T0]], %[[C1]] : vector<3xi1>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C2]] [0] : vector<3xi1> into vector<2x3xi1>
// CHECK: %[[T4:.*]] = arith.cmpi slt, %[[c1]], %[[A]] : index
-// CHECK: %[[T5:.*]] = select %[[T4]], %[[T0]], %[[C1]] : vector<3xi1>
+// CHECK: %[[T5:.*]] = arith.select %[[T4]], %[[T0]], %[[C1]] : vector<3xi1>
// CHECK: %[[T6:.*]] = vector.insert %[[T5]], %[[T3]] [1] : vector<3xi1> into vector<2x3xi1>
// CHECK: return %[[T6]] : vector<2x3xi1>
@@ -848,13 +848,13 @@ func @genbool_var_2d(%arg0: index, %arg1: index) -> vector<2x3xi1> {
// CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index
// CHECK: %[[T0:.*]] = vector.create_mask %[[C]] : vector<7xi1>
// CHECK: %[[T1:.*]] = arith.cmpi slt, %[[c0]], %[[B]] : index
-// CHECK: %[[T2:.*]] = select %[[T1]], %[[T0]], %[[C1]] : vector<7xi1>
+// CHECK: %[[T2:.*]] = arith.select %[[T1]], %[[T0]], %[[C1]] : vector<7xi1>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C2]] [0] : vector<7xi1> into vector<1x7xi1>
// CHECK: %[[T4:.*]] = arith.cmpi slt, %[[c0]], %[[A]] : index
-// CHECK: %[[T5:.*]] = select %[[T4]], %[[T3]], %[[C2]] : vector<1x7xi1>
+// CHECK: %[[T5:.*]] = arith.select %[[T4]], %[[T3]], %[[C2]] : vector<1x7xi1>
// CHECK: %[[T6:.*]] = vector.insert %[[T5]], %[[C3]] [0] : vector<1x7xi1> into vector<2x1x7xi1>
// CHECK: %[[T7:.*]] = arith.cmpi slt, %[[c1]], %[[A]] : index
-// CHECK: %[[T8:.*]] = select %[[T7]], %[[T3]], %[[C2]] : vector<1x7xi1>
+// CHECK: %[[T8:.*]] = arith.select %[[T7]], %[[T3]], %[[C2]] : vector<1x7xi1>
// CHECK: %[[T9:.*]] = vector.insert %[[T8]], %[[T6]] [1] : vector<1x7xi1> into vector<2x1x7xi1>
// CHECK: return %[[T9]] : vector<2x1x7xi1>
diff --git a/mlir/test/Dialect/Vector/vector-transforms.mlir b/mlir/test/Dialect/Vector/vector-transforms.mlir
index 96c35210d8fd7..97d7316a55721 100644
--- a/mlir/test/Dialect/Vector/vector-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-transforms.mlir
@@ -355,10 +355,10 @@ func @cancelling_shape_cast_ops(%arg0 : vector<2x4xf32>) -> vector<2x4xf32> {
// CHECK: %[[VT5:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C2]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT6:.*]] = vector.transfer_read %[[ARG1]][%[[C2]], %[[C0]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
// CHECK: %[[VT7:.*]] = vector.transfer_read %[[ARG1]][%[[C2]], %[[C2]]], {{.*}} : memref<4x4xf32>, vector<2x2xf32>
-// CHECK: %[[SEL0:.*]] = select %[[CMP0]], %[[VT0]], %[[VT4]] : vector<2x2xi1>, vector<2x2xf32>
-// CHECK: %[[SEL1:.*]] = select %[[CMP1]], %[[VT1]], %[[VT5]] : vector<2x2xi1>, vector<2x2xf32>
-// CHECK: %[[SEL2:.*]] = select %[[CMP2]], %[[VT2]], %[[VT6]] : vector<2x2xi1>, vector<2x2xf32>
-// CHECK: %[[SEL3:.*]] = select %[[CMP3]], %[[VT3]], %[[VT7]] : vector<2x2xi1>, vector<2x2xf32>
+// CHECK: %[[SEL0:.*]] = arith.select %[[CMP0]], %[[VT0]], %[[VT4]] : vector<2x2xi1>, vector<2x2xf32>
+// CHECK: %[[SEL1:.*]] = arith.select %[[CMP1]], %[[VT1]], %[[VT5]] : vector<2x2xi1>, vector<2x2xf32>
+// CHECK: %[[SEL2:.*]] = arith.select %[[CMP2]], %[[VT2]], %[[VT6]] : vector<2x2xi1>, vector<2x2xf32>
+// CHECK: %[[SEL3:.*]] = arith.select %[[CMP3]], %[[VT3]], %[[VT7]] : vector<2x2xi1>, vector<2x2xf32>
// CHECK: vector.transfer_write %[[SEL0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<2x2xf32>, memref<4x4xf32>
// CHECK: vector.transfer_write %[[SEL1]], %[[ARG0]][%[[C0]], %[[C2]]] {{.*}} : vector<2x2xf32>, memref<4x4xf32>
// CHECK: vector.transfer_write %[[SEL2]], %[[ARG0]][%[[C2]], %[[C0]]] {{.*}} : vector<2x2xf32>, memref<4x4xf32>
@@ -372,7 +372,7 @@ func @elementwise_unroll(%arg0 : memref<4x4xf32>, %arg1 : memref<4x4xf32>) {
// Vector transfer split pattern only support single user right now.
%2 = vector.transfer_read %arg0[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
%3 = vector.transfer_read %arg1[%c0, %c0], %cf0 : memref<4x4xf32>, vector<4x4xf32>
- %4 = select %cond, %2, %3 : vector<4x4xi1>, vector<4x4xf32>
+ %4 = arith.select %cond, %2, %3 : vector<4x4xi1>, vector<4x4xf32>
vector.transfer_write %4, %arg0[%c0, %c0] : vector<4x4xf32>, memref<4x4xf32>
return
}
@@ -510,12 +510,12 @@ func @cast_away_elementwise_leading_one_dims(
// CHECK: vector.extract %{{.*}}[0] : vector<1x4xf32>
// CHECK: select %{{.*}}, %{{.*}}, %{{.*}} : vector<4xi1>, vector<4xf32>
// CHECK: vector.broadcast %{{.*}} : vector<4xf32> to vector<1x4xf32>
- %2 = select %1, %arg3, %arg2 : vector<1x4xi1>, vector<1x4xf32>
+ %2 = arith.select %1, %arg3, %arg2 : vector<1x4xi1>, vector<1x4xf32>
// CHECK: vector.extract %{{.*}}[0] : vector<1x4xf32>
// CHECK: vector.extract %{{.*}}[0] : vector<1x4xf32>
// CHECK: select %arg4, %12, %{{.*}} : vector<4xf32>
// CHECK: vector.broadcast %{{.*}} : vector<4xf32> to vector<1x4xf32>
- %3 = select %arg4, %arg3, %arg2 : vector<1x4xf32>
+ %3 = arith.select %arg4, %arg3, %arg2 : vector<1x4xf32>
return %0, %1, %2, %3: vector<1x1x8xf32>, vector<1x4xi1>, vector<1x4xf32>, vector<1x4xf32>
}
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 5f00a71c26ff7..fefe7387f284a 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -61,20 +61,20 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
%tci1 = arith.constant dense<1> : tensor<42xi1>
%vci1 = arith.constant dense<1> : vector<42xi1>
- // CHECK: %{{.*}} = select %{{.*}}, %arg3, %arg3 : index
- %21 = select %true, %idx, %idx : index
+ // CHECK: %{{.*}} = arith.select %{{.*}}, %arg3, %arg3 : index
+ %21 = arith.select %true, %idx, %idx : index
- // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : tensor<42xi1>, tensor<42xi32>
- %22 = select %tci1, %tci32, %tci32 : tensor<42 x i1>, tensor<42 x i32>
+ // CHECK: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : tensor<42xi1>, tensor<42xi32>
+ %22 = arith.select %tci1, %tci32, %tci32 : tensor<42 x i1>, tensor<42 x i32>
- // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : vector<42xi1>, vector<42xi32>
- %23 = select %vci1, %vci32, %vci32 : vector<42 x i1>, vector<42 x i32>
+ // CHECK: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : vector<42xi1>, vector<42xi32>
+ %23 = arith.select %vci1, %vci32, %vci32 : vector<42 x i1>, vector<42 x i32>
- // CHECK: %{{.*}} = select %{{.*}}, %arg3, %arg3 : index
- %24 = "std.select"(%true, %idx, %idx) : (i1, index, index) -> index
+ // CHECK: %{{.*}} = arith.select %{{.*}}, %arg3, %arg3 : index
+ %24 = "arith.select"(%true, %idx, %idx) : (i1, index, index) -> index
- // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : tensor<42xi32>
- %25 = std.select %true, %tci32, %tci32 : tensor<42 x i32>
+ // CHECK: %{{.*}} = arith.select %{{.*}}, %{{.*}}, %{{.*}} : tensor<42xi32>
+ %25 = arith.select %true, %tci32, %tci32 : tensor<42 x i32>
%64 = arith.constant dense<0.> : vector<4 x f32>
%tcf32 = arith.constant dense<0.> : tensor<42 x f32>
diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir
index 037373d1e3346..8415126f42fcf 100644
--- a/mlir/test/IR/invalid-ops.mlir
+++ b/mlir/test/IR/invalid-ops.mlir
@@ -55,7 +55,7 @@ func @func_with_ops(i32, i32, i32) {
^bb0(%cond : i32, %t : i32, %f : i32):
// expected-error at +2 {{
diff erent type than prior uses}}
// expected-note at -2 {{prior use here}}
- %r = select %cond, %t, %f : i32
+ %r = arith.select %cond, %t, %f : i32
}
// -----
@@ -63,7 +63,7 @@ func @func_with_ops(i32, i32, i32) {
func @func_with_ops(i32, i32, i32) {
^bb0(%cond : i32, %t : i32, %f : i32):
// expected-error at +1 {{op operand #0 must be bool-like}}
- %r = "std.select"(%cond, %t, %f) : (i32, i32, i32) -> i32
+ %r = "arith.select"(%cond, %t, %f) : (i32, i32, i32) -> i32
}
// -----
@@ -75,7 +75,7 @@ func @func_with_ops(i1, i32, i64) {
// message. In final state the error should refer to mismatch in true_value and
// false_value.
// expected-error at +1 {{type}}
- %r = "std.select"(%cond, %t, %f) : (i1, i32, i64) -> i32
+ %r = "arith.select"(%cond, %t, %f) : (i1, i32, i64) -> i32
}
// -----
@@ -83,7 +83,7 @@ func @func_with_ops(i1, i32, i64) {
func @func_with_ops(vector<12xi1>, vector<42xi32>, vector<42xi32>) {
^bb0(%cond : vector<12xi1>, %t : vector<42xi32>, %f : vector<42xi32>):
// expected-error at +1 {{all non-scalar operands/results must have the same shape and base type}}
- %r = "std.select"(%cond, %t, %f) : (vector<12xi1>, vector<42xi32>, vector<42xi32>) -> vector<42xi32>
+ %r = "arith.select"(%cond, %t, %f) : (vector<12xi1>, vector<42xi32>, vector<42xi32>) -> vector<42xi32>
}
// -----
@@ -91,7 +91,7 @@ func @func_with_ops(vector<12xi1>, vector<42xi32>, vector<42xi32>) {
func @func_with_ops(tensor<12xi1>, tensor<42xi32>, tensor<42xi32>) {
^bb0(%cond : tensor<12xi1>, %t : tensor<42xi32>, %f : tensor<42xi32>):
// expected-error at +1 {{all non-scalar operands/results must have the same shape and base type}}
- %r = "std.select"(%cond, %t, %f) : (tensor<12xi1>, tensor<42xi32>, tensor<42xi32>) -> tensor<42xi32>
+ %r = "arith.select"(%cond, %t, %f) : (tensor<12xi1>, tensor<42xi32>, tensor<42xi32>) -> tensor<42xi32>
}
// -----
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
index 8edeed690af48..580b6e0f1a4e9 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize \
// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation -convert-linalg-to-llvm \
// RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
index 1041497ce0ab7..86b5ed962b2ff 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -convert-elementwise-to-linalg -std-bufferize \
+// RUN: mlir-opt %s -convert-elementwise-to-linalg \
// RUN: -arith-bufferize -linalg-bufferize -tensor-bufferize \
// RUN: -func-bufferize -buffer-deallocation -convert-linalg-to-loops \
// RUN: -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
index a81e7f5a9368e..794365af222c4 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize \
// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation -convert-linalg-to-llvm \
// RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
index d71dc53fea097..3734e9ce18d4e 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize \
// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation \
// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
index 4e21495c58030..bb17ff2f07bda 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize \
// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation \
// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
index c22cbc5f6ab19..fd9778cf3b79f 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize \
// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \
// RUN: -finalizing-bufferize -buffer-deallocation \
// RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
index 360717f75223b..714db859ac560 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -arith-bufferize -std-bufferize -linalg-bufferize \
+// RUN: mlir-opt %s -arith-bufferize -linalg-bufferize \
// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops \
// RUN: -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
index 8de3403b1ec09..37023e1622fdd 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir
@@ -1,5 +1,5 @@
// UNSUPPORTED: asan
-// RUN: mlir-opt %s -linalg-bufferize -std-bufferize -arith-bufferize \
+// RUN: mlir-opt %s -linalg-bufferize -arith-bufferize \
// RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops -convert-scf-to-std \
// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
@@ -7,7 +7,7 @@
// RUN: | FileCheck %s
// RUN: mlir-opt %s -linalg-tile="tile-sizes=1,2,3" -linalg-bufferize \
-// RUN: -scf-bufferize -std-bufferize -arith-bufferize -tensor-bufferize \
+// RUN: -scf-bufferize -arith-bufferize -tensor-bufferize \
// RUN: -func-bufferize \
// RUN: -finalizing-bufferize -convert-linalg-to-loops -convert-scf-to-std -convert-scf-to-std \
// RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
index ccc87af7287cb..7181c1766425c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py
@@ -130,7 +130,7 @@ def __init__(self, options: str):
f'convert-scf-to-std,'
f'func-bufferize,'
f'arith-bufferize,'
- f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'
+ f'builtin.func(tensor-bufferize,finalizing-bufferize),'
f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'
f'lower-affine,'
f'convert-memref-to-llvm,'
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
index 3c94581c30c41..f6617545003b9 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py
@@ -120,7 +120,7 @@ def __init__(self, options: str):
f'convert-scf-to-std,'
f'func-bufferize,'
f'arith-bufferize,'
- f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'
+ f'builtin.func(tensor-bufferize,finalizing-bufferize),'
f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'
f'lower-affine,'
f'convert-memref-to-llvm,'
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
index ae12394207133..c8c52736fea70 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py
@@ -72,7 +72,7 @@ def __init__(self):
f'convert-scf-to-std,'
f'func-bufferize,'
f'arith-bufferize,'
- f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'
+ f'builtin.func(tensor-bufferize,finalizing-bufferize),'
f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'
f'lower-affine,'
f'convert-memref-to-llvm,'
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index c2635c58b7313..4375f5ed3e835 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -80,7 +80,7 @@ def __init__(self):
f'convert-scf-to-std,'
f'func-bufferize,'
f'arith-bufferize,'
- f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'
+ f'builtin.func(tensor-bufferize,finalizing-bufferize),'
f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'
f'lower-affine,'
f'convert-memref-to-llvm,'
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
index 761620c5b0715..d8c2f904d4b55 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py
@@ -178,7 +178,7 @@ def __init__(self, sparsification_options: str, support_lib: str):
f'convert-scf-to-std,'
f'func-bufferize,'
f'arith-bufferize,'
- f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),'
+ f'builtin.func(tensor-bufferize,finalizing-bufferize),'
f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},'
f'lower-affine,'
f'convert-memref-to-llvm,'
diff --git a/mlir/test/Transforms/canonicalize-block-merge.mlir b/mlir/test/Transforms/canonicalize-block-merge.mlir
index ccc18165ba47c..acaf13957bafd 100644
--- a/mlir/test/Transforms/canonicalize-block-merge.mlir
+++ b/mlir/test/Transforms/canonicalize-block-merge.mlir
@@ -55,7 +55,7 @@ func @mismatch_unknown_terminator(%arg0 : i32, %arg1 : i32) -> i32 {
// CHECK-LABEL: func @mismatch_operands
// CHECK-SAME: %[[COND:.*]]: i1, %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32
func @mismatch_operands(%cond : i1, %arg0 : i32, %arg1 : i32) -> i32 {
- // CHECK: %[[RES:.*]] = select %[[COND]], %[[ARG0]], %[[ARG1]]
+ // CHECK: %[[RES:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG1]]
// CHECK: return %[[RES]]
cond_br %cond, ^bb1, ^bb2
@@ -71,8 +71,8 @@ func @mismatch_operands(%cond : i1, %arg0 : i32, %arg1 : i32) -> i32 {
// CHECK-LABEL: func @mismatch_operands_matching_arguments(
// CHECK-SAME: %[[COND:.*]]: i1, %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32
func @mismatch_operands_matching_arguments(%cond : i1, %arg0 : i32, %arg1 : i32) -> (i32, i32) {
- // CHECK: %[[RES0:.*]] = select %[[COND]], %[[ARG1]], %[[ARG0]]
- // CHECK: %[[RES1:.*]] = select %[[COND]], %[[ARG0]], %[[ARG1]]
+ // CHECK: %[[RES0:.*]] = arith.select %[[COND]], %[[ARG1]], %[[ARG0]]
+ // CHECK: %[[RES1:.*]] = arith.select %[[COND]], %[[ARG0]], %[[ARG1]]
// CHECK: return %[[RES1]], %[[RES0]]
cond_br %cond, ^bb1(%arg1 : i32), ^bb2(%arg0 : i32)
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index e1111bd2ab3c9..d32ee1a16d941 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -602,14 +602,14 @@ func @lowered_affine_mod() -> (index, index) {
%c0 = arith.constant 0 : index
%1 = arith.cmpi slt, %0, %c0 : index
%2 = arith.addi %0, %c42 : index
- %3 = select %1, %2, %0 : index
+ %3 = arith.select %1, %2, %0 : index
%c43 = arith.constant 43 : index
%c42_0 = arith.constant 42 : index
%4 = arith.remsi %c43, %c42_0 : index
%c0_1 = arith.constant 0 : index
%5 = arith.cmpi slt, %4, %c0_1 : index
%6 = arith.addi %4, %c42_0 : index
- %7 = select %5, %6, %4 : index
+ %7 = arith.select %5, %6, %4 : index
return %3, %7 : index, index
}
@@ -628,20 +628,20 @@ func @lowered_affine_floordiv() -> (index, index) {
%c-1 = arith.constant -1 : index
%0 = arith.cmpi slt, %c-43, %c0 : index
%1 = arith.subi %c-1, %c-43 : index
- %2 = select %0, %1, %c-43 : index
+ %2 = arith.select %0, %1, %c-43 : index
%3 = arith.divsi %2, %c42 : index
%4 = arith.subi %c-1, %3 : index
- %5 = select %0, %4, %3 : index
+ %5 = arith.select %0, %4, %3 : index
%c43 = arith.constant 43 : index
%c42_0 = arith.constant 42 : index
%c0_1 = arith.constant 0 : index
%c-1_2 = arith.constant -1 : index
%6 = arith.cmpi slt, %c43, %c0_1 : index
%7 = arith.subi %c-1_2, %c43 : index
- %8 = select %6, %7, %c43 : index
+ %8 = arith.select %6, %7, %c43 : index
%9 = arith.divsi %8, %c42_0 : index
%10 = arith.subi %c-1_2, %9 : index
- %11 = select %6, %10, %9 : index
+ %11 = arith.select %6, %10, %9 : index
return %5, %11 : index, index
}
@@ -660,11 +660,11 @@ func @lowered_affine_ceildiv() -> (index, index) {
%0 = arith.cmpi sle, %c-43, %c0 : index
%1 = arith.subi %c0, %c-43 : index
%2 = arith.subi %c-43, %c1 : index
- %3 = select %0, %1, %2 : index
+ %3 = arith.select %0, %1, %2 : index
%4 = arith.divsi %3, %c42 : index
%5 = arith.subi %c0, %4 : index
%6 = arith.addi %4, %c1 : index
- %7 = select %0, %5, %6 : index
+ %7 = arith.select %0, %5, %6 : index
// CHECK-DAG: %c2 = arith.constant 2 : index
%c43 = arith.constant 43 : index
%c42_0 = arith.constant 42 : index
@@ -673,11 +673,11 @@ func @lowered_affine_ceildiv() -> (index, index) {
%8 = arith.cmpi sle, %c43, %c0_1 : index
%9 = arith.subi %c0_1, %c43 : index
%10 = arith.subi %c43, %c1_2 : index
- %11 = select %8, %9, %10 : index
+ %11 = arith.select %8, %9, %10 : index
%12 = arith.divsi %11, %c42_0 : index
%13 = arith.subi %c0_1, %12 : index
%14 = arith.addi %12, %c1_2 : index
- %15 = select %8, %13, %14 : index
+ %15 = arith.select %8, %13, %14 : index
// CHECK-NEXT: return %c-1, %c2
return %7, %15 : index, index
diff --git a/mlir/test/Transforms/parametric-tiling.mlir b/mlir/test/Transforms/parametric-tiling.mlir
index 7ae4c0e162a13..2c6ff1de649f1 100644
--- a/mlir/test/Transforms/parametric-tiling.mlir
+++ b/mlir/test/Transforms/parametric-tiling.mlir
@@ -41,11 +41,11 @@ func @rectangular(%arg0: memref<?x?xf32>) {
// Upper bound for the inner loop min(%i + %step, %c44).
// COMMON: %[[stepped:.*]] = arith.addi %[[i]], %[[step]]
// COMMON-NEXT: arith.cmpi slt, %c44, %[[stepped]]
- // COMMON-NEXT: %[[ub:.*]] = select {{.*}}, %c44, %[[stepped]]
+ // COMMON-NEXT: %[[ub:.*]] = arith.select {{.*}}, %c44, %[[stepped]]
//
// TILE_74: %[[stepped2:.*]] = arith.addi %[[j]], %[[step2]]
// TILE_74-NEXT: arith.cmpi slt, %c44, %[[stepped2]]
- // TILE_74-NEXT: %[[ub2:.*]] = select {{.*}}, %c44, %[[stepped2]]
+ // TILE_74-NEXT: %[[ub2:.*]] = arith.select {{.*}}, %c44, %[[stepped2]]
// Created inner scf.
// COMMON:scf.for %[[ii:.*]] = %[[i]] to %[[ub:.*]] step %c1
@@ -109,10 +109,10 @@ func @triangular(%arg0: memref<?x?xf32>) {
// Upper bound for the inner loop min(%i + %step, %c44).
// COMMON: %[[stepped:.*]] = arith.addi %[[i]], %[[step]]
// COMMON-NEXT: arith.cmpi slt, %c44, %[[stepped]]
- // COMMON-NEXT: %[[ub:.*]] = select {{.*}}, %c44, %[[stepped]]
+ // COMMON-NEXT: %[[ub:.*]] = arith.select {{.*}}, %c44, %[[stepped]]
// TILE_74: %[[stepped2:.*]] = arith.addi %[[j]], %[[step2]]
// TILE_74-NEXT: arith.cmpi slt, %[[i]], %[[stepped2]]
- // TILE_74-NEXT: %[[ub2:.*]] = select {{.*}}, %[[i]], %[[stepped2]]
+ // TILE_74-NEXT: %[[ub2:.*]] = arith.select {{.*}}, %[[i]], %[[stepped2]]
//
// Created inner scf.
// COMMON:scf.for %[[ii:.*]] = %[[i]] to %[[ub:.*]] step %c1
diff --git a/mlir/test/Transforms/sccp-callgraph.mlir b/mlir/test/Transforms/sccp-callgraph.mlir
index 552c73de51c07..7bec5dcb5d8a3 100644
--- a/mlir/test/Transforms/sccp-callgraph.mlir
+++ b/mlir/test/Transforms/sccp-callgraph.mlir
@@ -262,11 +262,11 @@ func @non_symbol_defining_callable() -> i32 {
// CHECK-LABEL: func private @unreferenced_private_function
func private @unreferenced_private_function() -> i32 {
- // CHECK: %[[RES:.*]] = select
+ // CHECK: %[[RES:.*]] = arith.select
// CHECK: return %[[RES]] : i32
%true = arith.constant true
%cst0 = arith.constant 0 : i32
%cst1 = arith.constant 1 : i32
- %result = select %true, %cst0, %cst1 : i32
+ %result = arith.select %true, %cst0, %cst1 : i32
return %result : i32
}
diff --git a/mlir/test/Transforms/sccp.mlir b/mlir/test/Transforms/sccp.mlir
index 4dccc62673435..3ff1b526fc525 100644
--- a/mlir/test/Transforms/sccp.mlir
+++ b/mlir/test/Transforms/sccp.mlir
@@ -9,7 +9,7 @@ func @no_control_flow(%arg0: i32) -> i32 {
%cond = arith.constant true
%cst_1 = arith.constant 1 : i32
- %select = select %cond, %cst_1, %arg0 : i32
+ %select = arith.select %cond, %cst_1, %arg0 : i32
return %select : i32
}
diff --git a/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp b/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp
index c98f0949d58c2..230a412cd2c2c 100644
--- a/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp
@@ -22,7 +22,6 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/SCF/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
-#include "mlir/Dialect/StandardOps/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
@@ -62,7 +61,6 @@ struct TestComprehensiveFunctionBufferize
arith::registerBufferizableOpInterfaceExternalModels(registry);
linalg_ext::registerBufferizableOpInterfaceExternalModels(registry);
scf::registerBufferizableOpInterfaceExternalModels(registry);
- mlir::registerBufferizableOpInterfaceExternalModels(registry);
tensor::registerBufferizableOpInterfaceExternalModels(registry);
vector::registerBufferizableOpInterfaceExternalModels(registry);
}
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index b644064b032b9..1791768da811d 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -65,7 +65,7 @@ struct TestVectorToVectorLowering
private:
// Return the target shape based on op type.
static Optional<SmallVector<int64_t, 4>> getShape(Operation *op) {
- if (isa<arith::AddFOp, SelectOp, arith::CmpFOp>(op))
+ if (isa<arith::AddFOp, arith::SelectOp, arith::CmpFOp>(op))
return SmallVector<int64_t, 4>(2, 2);
if (isa<vector::ContractionOp>(op))
return SmallVector<int64_t, 4>(3, 2);
@@ -96,8 +96,8 @@ struct TestVectorToVectorLowering
}
static LogicalResult filter(Operation *op) {
- return success(isa<arith::AddFOp, SelectOp, arith::CmpFOp, ContractionOp,
- TransferReadOp, TransferWriteOp>(op));
+ return success(isa<arith::AddFOp, arith::SelectOp, arith::CmpFOp,
+ ContractionOp, TransferReadOp, TransferWriteOp>(op));
}
};
More information about the Mlir-commits
mailing list