[llvm] ed91084 - [InstCombine] Test cases for D154565

Dhruv Chawla via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 8 01:04:40 PDT 2023


Author: Dhruv Chawla
Date: 2023-07-08T12:45:15+05:30
New Revision: ed910840906b480fc1cab36b3e73021956dce511

URL: https://github.com/llvm/llvm-project/commit/ed910840906b480fc1cab36b3e73021956dce511
DIFF: https://github.com/llvm/llvm-project/commit/ed910840906b480fc1cab36b3e73021956dce511.diff

LOG: [InstCombine] Test cases for D154565

Create test cases to test the fold for the expression pattern
'uadd_sat(X, C) pred C2'.

Differential Revision: https://reviews.llvm.org/D154566

Added: 
    llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll b/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll
new file mode 100644
index 00000000000000..e5ce97a6e34c67
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll
@@ -0,0 +1,274 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+; Tests for InstCombineCompares.cpp::foldICmpUSubSatOrUAddSatWithConstant
+; - uadd_sat case
+
+; ==============================================================================
+; Basic tests with one user
+; ==============================================================================
+define i1 @icmp_eq_basic(i8 %arg) {
+; CHECK-LABEL: define i1 @icmp_eq_basic
+; CHECK-SAME: (i8 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[ADD]], 5
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2)
+  %cmp = icmp eq i8 %add, 5
+  ret i1 %cmp
+}
+
+define i1 @icmp_ne_basic(i16 %arg) {
+; CHECK-LABEL: define i1 @icmp_ne_basic
+; CHECK-SAME: (i16 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 8)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i16 [[ADD]], 9
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 8)
+  %cmp = icmp ne i16 %add, 9
+  ret i1 %cmp
+}
+
+define i1 @icmp_ule_basic(i32 %arg) {
+; CHECK-LABEL: define i1 @icmp_ule_basic
+; CHECK-SAME: (i32 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 2)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 4
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 2)
+  %cmp = icmp ule i32 %add, 3
+  ret i1 %cmp
+}
+
+define i1 @icmp_ult_basic(i64 %arg) {
+; CHECK-LABEL: define i1 @icmp_ult_basic
+; CHECK-SAME: (i64 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 5)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[ADD]], 20
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 5)
+  %cmp = icmp ult i64 %add, 20
+  ret i1 %cmp
+}
+
+define i1 @icmp_uge_basic(i8 %arg) {
+; CHECK-LABEL: define i1 @icmp_uge_basic
+; CHECK-SAME: (i8 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 4)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[ADD]], 7
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 4)
+  %cmp = icmp uge i8 %add, 8
+  ret i1 %cmp
+}
+
+define i1 @icmp_ugt_basic(i16 %arg) {
+; CHECK-LABEL: define i1 @icmp_ugt_basic
+; CHECK-SAME: (i16 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 1)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i16 [[ADD]], 3
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 1)
+  %cmp = icmp ugt i16 %add, 3
+  ret i1 %cmp
+}
+
+define i1 @icmp_sle_basic(i32 %arg) {
+; CHECK-LABEL: define i1 @icmp_sle_basic
+; CHECK-SAME: (i32 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 10)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD]], 9
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 10)
+  %cmp = icmp sle i32 %add, 8
+  ret i1 %cmp
+}
+
+define i1 @icmp_slt_basic(i64 %arg) {
+; CHECK-LABEL: define i1 @icmp_slt_basic
+; CHECK-SAME: (i64 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 24)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i64 [[ADD]], 5
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 24)
+  %cmp = icmp slt i64 %add, 5
+  ret i1 %cmp
+}
+
+define i1 @icmp_sge_basic(i8 %arg) {
+; CHECK-LABEL: define i1 @icmp_sge_basic
+; CHECK-SAME: (i8 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 1)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[ADD]], 3
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 1)
+  %cmp = icmp sge i8 %add, 4
+  ret i1 %cmp
+}
+
+define i1 @icmp_sgt_basic(i16 %arg) {
+; CHECK-LABEL: define i1 @icmp_sgt_basic
+; CHECK-SAME: (i16 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 2)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i16 [[ADD]], 5
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 2)
+  %cmp = icmp sgt i16 %add, 5
+  ret i1 %cmp
+}
+
+; ==============================================================================
+; Tests with more than user
+; ==============================================================================
+define i1 @icmp_eq_multiuse(i8 %arg) {
+; CHECK-LABEL: define i1 @icmp_eq_multiuse
+; CHECK-SAME: (i8 [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[ADD]], 5
+; CHECK-NEXT:    call void @use.i8(i8 [[ADD]])
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2)
+  %cmp = icmp eq i8 %add, 5
+  call void @use.i8(i8 %add)
+  ret i1 %cmp
+}
+
+; ==============================================================================
+; Tests with vector types
+; ==============================================================================
+define <2 x i1> @icmp_eq_vector_equal(<2 x i8> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_equal
+; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 2, i8 2>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 5>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>)
+  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_eq_vector_unequal(<2 x i8> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_unequal
+; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 1, i8 2>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 6>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 1, i8 2>)
+  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 6>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_ne_vector_equal(<2 x i16> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_equal
+; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> <i16 3, i16 3>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], <i16 5, i16 5>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 3>)
+  %cmp = icmp ne <2 x i16> %add, <i16 5, i16 5>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_ne_vector_unequal(<2 x i16> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_unequal
+; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> <i16 3, i16 33>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], <i16 7, i16 6>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 33>)
+  %cmp = icmp ne <2 x i16> %add, <i16 7, i16 6>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_ule_vector_equal(<2 x i32> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_equal
+; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> <i32 3, i32 3>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], <i32 5, i32 5>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 3>)
+  %cmp = icmp ult <2 x i32> %add, <i32 5, i32 5>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_ule_vector_unequal(<2 x i32> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_unequal
+; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> <i32 3, i32 35>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], <i32 5, i32 7>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 35>)
+  %cmp = icmp ult <2 x i32> %add, <i32 5, i32 7>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_sgt_vector_equal(<2 x i64> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_equal
+; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> <i64 409623, i64 409623>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], <i64 1234, i64 1234>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 409623, i64 409623>)
+  %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 1234>
+  ret <2 x i1> %cmp
+}
+
+define <2 x i1> @icmp_sgt_vector_unequal(<2 x i64> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_unequal
+; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> <i64 320498, i64 409623>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], <i64 1234, i64 3456>
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 320498, i64 409623>)
+  %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 3456>
+  ret <2 x i1> %cmp
+}
+
+; ==============================================================================
+; Tests with vector types and multiple uses
+; ==============================================================================
+define <2 x i1> @icmp_eq_vector_multiuse_equal(<2 x i8> %arg) {
+; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_multiuse_equal
+; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) {
+; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 2, i8 2>)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 5>
+; CHECK-NEXT:    call void @use.v2i8(<2 x i8> [[ADD]])
+; CHECK-NEXT:    ret <2 x i1> [[CMP]]
+;
+  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>)
+  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5>
+  call void @use.v2i8(<2 x i8> %add)
+  ret <2 x i1> %cmp
+}
+
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+
+declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
+declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
+
+declare void @use.i8(i8)
+declare void @use.v2i8(<2 x i8>)


        


More information about the llvm-commits mailing list