[Mlir-commits] [mlir] 7e835ae - [mlir][arith] Add `arith.cmpi` support to WIE
Jakub Kuderski
llvmlistbot at llvm.org
Fri Nov 11 12:14:28 PST 2022
Author: Jakub Kuderski
Date: 2022-11-11T15:14:02-05:00
New Revision: 7e835ae57c3628c7168fbdaaa26aa9350dfd4387
URL: https://github.com/llvm/llvm-project/commit/7e835ae57c3628c7168fbdaaa26aa9350dfd4387
DIFF: https://github.com/llvm/llvm-project/commit/7e835ae57c3628c7168fbdaaa26aa9350dfd4387.diff
LOG: [mlir][arith] Add `arith.cmpi` support to WIE
This inludes both LIT tests over IR and runtime checks.
Reviewed By: antiagainst
Differential Revision: https://reviews.llvm.org/D137846
Added:
mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-cmpi-i16.mlir
Modified:
mlir/lib/Dialect/Arith/Transforms/EmulateWideInt.cpp
mlir/test/Dialect/Arith/emulate-wide-int.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Arith/Transforms/EmulateWideInt.cpp b/mlir/lib/Dialect/Arith/Transforms/EmulateWideInt.cpp
index 6d83bfafafcc7..6a725927eefc5 100644
--- a/mlir/lib/Dialect/Arith/Transforms/EmulateWideInt.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/EmulateWideInt.cpp
@@ -324,6 +324,81 @@ struct ConvertBitwiseBinary final : OpConversionPattern<BinaryOp> {
}
};
+//===----------------------------------------------------------------------===//
+// ConvertCmpI
+//===----------------------------------------------------------------------===//
+
+/// Returns the matching unsigned version of the given predicate `pred`, or the
+/// same predicate if `pred` is not a signed.
+static arith::CmpIPredicate toUnsignedPredicate(arith::CmpIPredicate pred) {
+ using P = arith::CmpIPredicate;
+ switch (pred) {
+ case P::sge:
+ return P::uge;
+ case P::sgt:
+ return P::ugt;
+ case P::sle:
+ return P::ule;
+ case P::slt:
+ return P::ult;
+ default:
+ return pred;
+ }
+}
+
+struct ConvertCmpI final : OpConversionPattern<arith::CmpIOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ Location loc = op->getLoc();
+ auto inputTy = getTypeConverter()
+ ->convertType(op.getLhs().getType())
+ .dyn_cast_or_null<VectorType>();
+ if (!inputTy)
+ return rewriter.notifyMatchFailure(
+ loc, llvm::formatv("unsupported type: {0}", op.getType()));
+
+ arith::CmpIPredicate highPred = adaptor.getPredicate();
+ arith::CmpIPredicate lowPred = toUnsignedPredicate(highPred);
+
+ auto [lhsElem0, lhsElem1] =
+ extractLastDimHalves(rewriter, loc, adaptor.getLhs());
+ auto [rhsElem0, rhsElem1] =
+ extractLastDimHalves(rewriter, loc, adaptor.getRhs());
+
+ Value lowCmp =
+ rewriter.create<arith::CmpIOp>(loc, lowPred, lhsElem0, rhsElem0);
+ Value highCmp =
+ rewriter.create<arith::CmpIOp>(loc, highPred, lhsElem1, rhsElem1);
+
+ Value cmpResult{};
+ switch (highPred) {
+ case arith::CmpIPredicate::eq: {
+ cmpResult = rewriter.create<arith::AndIOp>(loc, lowCmp, highCmp);
+ break;
+ }
+ case arith::CmpIPredicate::ne: {
+ cmpResult = rewriter.create<arith::OrIOp>(loc, lowCmp, highCmp);
+ break;
+ }
+ default: {
+ // Handle inequality checks.
+ Value highEq = rewriter.create<arith::CmpIOp>(
+ loc, arith::CmpIPredicate::eq, lhsElem1, rhsElem1);
+ cmpResult =
+ rewriter.create<arith::SelectOp>(loc, highEq, lowCmp, highCmp);
+ break;
+ }
+ }
+
+ assert(cmpResult && "Unhandled case");
+ rewriter.replaceOp(op, dropTrailingX1Dim(rewriter, loc, cmpResult));
+ return success();
+ }
+};
+
//===----------------------------------------------------------------------===//
// ConvertMulI
//===----------------------------------------------------------------------===//
@@ -863,7 +938,7 @@ void arith::populateArithWideIntEmulationPatterns(
// Populate `arith.*` conversion patterns.
patterns.add<
// Misc ops.
- ConvertConstant, ConvertVectorPrint, ConvertSelect,
+ ConvertConstant, ConvertCmpI, ConvertSelect, ConvertVectorPrint,
// Binary ops.
ConvertAddI, ConvertMulI, ConvertShLI, ConvertShRUI,
// Bitwise binary ops.
diff --git a/mlir/test/Dialect/Arith/emulate-wide-int.mlir b/mlir/test/Dialect/Arith/emulate-wide-int.mlir
index b09aac099f9c5..3356542b30404 100644
--- a/mlir/test/Dialect/Arith/emulate-wide-int.mlir
+++ b/mlir/test/Dialect/Arith/emulate-wide-int.mlir
@@ -130,6 +130,177 @@ func.func @addi_vector_a_b(%a : vector<4xi64>, %b : vector<4xi64>) -> vector<4xi
return %x : vector<4xi64>
}
+// CHECK-LABEL: func.func @cmpi_eq_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK-NEXT: [[LHSLOW:%.+]] = vector.extract [[LHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[LHSHIGH:%.+]] = vector.extract [[LHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[RHSLOW:%.+]] = vector.extract [[RHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[RHSHIGH:%.+]] = vector.extract [[RHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[CLOW:%.+]] = arith.cmpi eq, [[LHSLOW]], [[RHSLOW]] : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.andi [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_eq_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi eq, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_eq_vector
+// CHECK-SAME: ([[ARG0:%.+]]: vector<3x2xi32>, [[ARG1:%.+]]: vector<3x2xi32>) -> vector<3xi1>
+// CHECK-NEXT: [[LOW0:%.+]] = vector.extract_strided_slice [[ARG0]] {offsets = [0, 0], sizes = [3, 1], strides = [1, 1]} : vector<3x2xi32> to vector<3x1xi32>
+// CHECK-NEXT: [[HIGH0:%.+]] = vector.extract_strided_slice [[ARG0]] {offsets = [0, 1], sizes = [3, 1], strides = [1, 1]} : vector<3x2xi32> to vector<3x1xi32>
+// CHECK-NEXT: [[LOW1:%.+]] = vector.extract_strided_slice [[ARG1]] {offsets = [0, 0], sizes = [3, 1], strides = [1, 1]} : vector<3x2xi32> to vector<3x1xi32>
+// CHECK-NEXT: [[HIGH1:%.+]] = vector.extract_strided_slice [[ARG1]] {offsets = [0, 1], sizes = [3, 1], strides = [1, 1]} : vector<3x2xi32> to vector<3x1xi32>
+// CHECK-NEXT: [[CLOW:%.+]] = arith.cmpi eq, [[LOW0]], [[LOW1]] : vector<3x1xi32>
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi eq, [[HIGH0]], [[HIGH1]] : vector<3x1xi32>
+// CHECK-NEXT: [[RES:%.+]] = arith.andi [[CLOW]], [[CHIGH]] : vector<3x1xi1>
+// CHECK-NEXT: [[CAST:%.+]] = vector.shape_cast [[RES]] : vector<3x1xi1> to vector<3xi1>
+// CHECK: return [[CAST]] : vector<3xi1>
+func.func @cmpi_eq_vector(%a : vector<3xi64>, %b : vector<3xi64>) -> vector<3xi1> {
+ %r = arith.cmpi eq, %a, %b : vector<3xi64>
+ return %r : vector<3xi1>
+}
+
+// CHECK-LABEL: func.func @cmpi_ne_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK-NEXT: [[LHSLOW:%.+]] = vector.extract [[LHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[LHSHIGH:%.+]] = vector.extract [[LHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[RHSLOW:%.+]] = vector.extract [[RHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[RHSHIGH:%.+]] = vector.extract [[RHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[CLOW:%.+]] = arith.cmpi ne, [[LHSLOW]], [[RHSLOW]] : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi ne, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.ori [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_ne_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi ne, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_ne_vector
+// CHECK-SAME: ([[ARG0:%.+]]: vector<3x2xi32>, [[ARG1:%.+]]: vector<3x2xi32>) -> vector<3xi1>
+// CHECK: [[CLOW:%.+]] = arith.cmpi ne, {{%.+}}, {{%.+}} : vector<3x1xi32>
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi ne, {{%.+}}, {{%.+}} : vector<3x1xi32>
+// CHECK-NEXT: [[RES:%.+]] = arith.ori [[CLOW]], [[CHIGH]] : vector<3x1xi1>
+// CHECK-NEXT: [[CAST:%.+]] = vector.shape_cast [[RES]] : vector<3x1xi1> to vector<3xi1>
+// CHECK: return [[CAST]] : vector<3xi1>
+func.func @cmpi_ne_vector(%a : vector<3xi64>, %b : vector<3xi64>) -> vector<3xi1> {
+ %r = arith.cmpi ne, %a, %b : vector<3xi64>
+ return %r : vector<3xi1>
+}
+
+// CHECK-LABEL: func.func @cmpi_sge_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK-NEXT: [[LHSLOW:%.+]] = vector.extract [[LHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[LHSHIGH:%.+]] = vector.extract [[LHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[RHSLOW:%.+]] = vector.extract [[RHS]][0] : vector<2xi32>
+// CHECK-NEXT: [[RHSHIGH:%.+]] = vector.extract [[RHS]][1] : vector<2xi32>
+// CHECK-NEXT: [[CLOW:%.+]] = arith.cmpi uge, [[LHSLOW]], [[RHSLOW]] : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi sge, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_sge_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi sge, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_sge_vector
+// CHECK-SAME: ([[ARG0:%.+]]: vector<3x2xi32>, [[ARG1:%.+]]: vector<3x2xi32>) -> vector<3xi1>
+// CHECK: [[CLOW:%.+]] = arith.cmpi uge, {{%.+}}, {{%.+}} : vector<3x1xi32>
+// CHECK: [[CHIGH:%.+]] = arith.cmpi sge, {{%.+}}, {{%.+}} : vector<3x1xi32>
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, {{%.+}}, {{%.+}} : vector<3x1xi32>
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : vector<3x1xi1>
+// CHECK-NEXT: [[CAST:%.+]] = vector.shape_cast [[RES]] : vector<3x1xi1> to vector<3xi1>
+// CHECK: return [[CAST]] : vector<3xi1>
+func.func @cmpi_sge_vector(%a : vector<3xi64>, %b : vector<3xi64>) -> vector<3xi1> {
+ %r = arith.cmpi sge, %a, %b : vector<3xi64>
+ return %r : vector<3xi1>
+}
+
+// CHECK-LABEL: func.func @cmpi_sgt_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ugt, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi sgt, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_sgt_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi sgt, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_sle_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ule, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi sle, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_sle_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi sle, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_slt_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ult, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi slt, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_slt_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi slt, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_uge_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi uge, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi uge, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_uge_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi uge, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_ugt_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ugt, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi ugt, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_ugt_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi ugt, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_ule_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ule, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi ule, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_ule_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi ule, %a, %b : i64
+ return %r : i1
+}
+
+// CHECK-LABEL: func.func @cmpi_ult_scalar
+// CHECK-SAME: ([[LHS:%.+]]: vector<2xi32>, [[RHS:%.+]]: vector<2xi32>)
+// CHECK: [[CLOW:%.+]] = arith.cmpi ult, {{%.+}}, {{%.+}} : i32
+// CHECK-NEXT: [[CHIGH:%.+]] = arith.cmpi ult, [[LHSHIGH:%.+]], [[RHSHIGH:%.+]] : i32
+// CHECK-NEXT: [[HIGHEQ:%.+]] = arith.cmpi eq, [[LHSHIGH]], [[RHSHIGH]] : i32
+// CHECK-NEXT: [[RES:%.+]] = arith.select [[HIGHEQ]], [[CLOW]], [[CHIGH]] : i1
+// CHECK: return [[RES]] : i1
+func.func @cmpi_ult_scalar(%a : i64, %b : i64) -> i1 {
+ %r = arith.cmpi ult, %a, %b : i64
+ return %r : i1
+}
+
// CHECK-LABEL: func @extsi_scalar
// CHECK-SAME: ([[ARG:%.+]]: i16) -> vector<2xi32>
// CHECK-NEXT: [[EXT:%.+]] = arith.extsi [[ARG]] : i16 to i32
diff --git a/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-cmpi-i16.mlir b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-cmpi-i16.mlir
new file mode 100644
index 0000000000000..2c0cfab65ee4d
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Arith/CPU/test-wide-int-emulation-cmpi-i16.mlir
@@ -0,0 +1,344 @@
+// Check that the wide integer `arith.cmpi` emulation produces the same result as wide
+// `arith.cmpi`. Emulate i16 ops with i8 ops.
+// Ops in functions prefixed with `emulate` will be emulated using i8 types.
+
+// RUN: mlir-opt %s --convert-scf-to-cf --convert-cf-to-llvm --convert-vector-to-llvm \
+// RUN: --convert-func-to-llvm --convert-arith-to-llvm | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
+// RUN: --shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s --match-full-lines
+
+// RUN: mlir-opt %s --test-arith-emulate-wide-int="widest-int-supported=8" \
+// RUN: --convert-scf-to-cf --convert-cf-to-llvm --convert-vector-to-llvm \
+// RUN: --convert-func-to-llvm --convert-arith-to-llvm | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
+// RUN: --shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s --match-full-lines
+
+func.func @emulate_cmpi_eq(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi eq, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_eq(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_eq(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_ne(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi ne, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_ne(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_ne(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_sge(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi sge, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_sge(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_sge(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_sgt(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi sgt, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_sgt(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_sgt(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_sle(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi sle, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_sle(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_sle(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_slt(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi slt, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_slt(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_slt(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_uge(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi uge, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_uge(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_uge(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_ugt(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi ugt, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_ugt(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_ugt(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_ule(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi ule, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_ule(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_ule(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @emulate_cmpi_ult(%lhs : i16, %rhs : i16) -> (i1) {
+ %res = arith.cmpi ult, %lhs, %rhs : i16
+ return %res : i1
+}
+
+func.func @check_cmpi_ult(%lhs : i16, %rhs : i16) -> () {
+ %res = func.call @emulate_cmpi_ult(%lhs, %rhs) : (i16, i16) -> (i1)
+ vector.print %res : i1
+ return
+}
+
+func.func @entry() {
+ %cst0 = arith.constant 0 : i16
+ %cst1 = arith.constant 1 : i16
+ %cst7 = arith.constant 7 : i16
+ %cst_n1 = arith.constant -1 : i16
+ %cst1337 = arith.constant 1337 : i16
+ %cst4096 = arith.constant 4096 : i16
+ %cst_i16_min = arith.constant -32768 : i16
+
+ // CHECK: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ func.call @check_cmpi_eq(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_eq(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ func.call @check_cmpi_ne(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_ne(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ func.call @check_cmpi_sge(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_sge(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ func.call @check_cmpi_sgt(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_sgt(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ func.call @check_cmpi_sle(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_sle(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ func.call @check_cmpi_slt(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_slt(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ func.call @check_cmpi_uge(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_uge(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ func.call @check_cmpi_ugt(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_ugt(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 1
+ func.call @check_cmpi_ule(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_ule(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 0
+ // CHECK-NEXT: 1
+ func.call @check_cmpi_ult(%cst0, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst0, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst1, %cst0) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst_n1, %cst1) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst1, %cst_n1) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst_n1, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst1337, %cst1337) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst4096, %cst4096) : (i16, i16) -> ()
+ func.call @check_cmpi_ult(%cst1337, %cst_i16_min) : (i16, i16) -> ()
+
+ return
+}
More information about the Mlir-commits
mailing list