[llvm] r355206 - [InstCombine] add tests for add+umin/umax canonicalization; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 1 09:29:10 PST 2019


Author: spatel
Date: Fri Mar  1 09:29:10 2019
New Revision: 355206

URL: http://llvm.org/viewvc/llvm-project?rev=355206&view=rev
Log:
[InstCombine] add tests for add+umin/umax canonicalization; NFC

Fixing this should solve the biggest part of the vector problems seen in:
https://bugs.llvm.org/show_bug.cgi?id=14613

Modified:
    llvm/trunk/test/Transforms/InstCombine/minmax-fold.ll

Modified: llvm/trunk/test/Transforms/InstCombine/minmax-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/minmax-fold.ll?rev=355206&r1=355205&r2=355206&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/minmax-fold.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/minmax-fold.ll Fri Mar  1 09:29:10 2019
@@ -907,3 +907,230 @@ define float @not_min_of_min(i8 %i, floa
   ret float %r
 }
 
+define i32 @add_umin(i32 %x) {
+; CHECK-LABEL: @add_umin(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 15
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 15
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+define i32 @add_umin_constant_limit(i32 %x) {
+; CHECK-LABEL: @add_umin_constant_limit(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 41
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 41
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+; TODO: assert that instsimplify always gets this?
+
+define i32 @add_umin_simplify(i32 %x) {
+; CHECK-LABEL: @add_umin_simplify(
+; CHECK-NEXT:    ret i32 42
+;
+  %a = add nuw i32 %x, 42
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+; TODO: assert that instsimplify always gets this?
+
+define i32 @add_umin_simplify2(i32 %x) {
+; CHECK-LABEL: @add_umin_simplify2(
+; CHECK-NEXT:    ret i32 42
+;
+  %a = add nuw i32 %x, 43
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umin_wrong_pred(i32 %x) {
+; CHECK-LABEL: @add_umin_wrong_pred(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 15
+; CHECK-NEXT:    [[C:%.*]] = icmp slt i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 15
+  %c = icmp slt i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umin_wrong_wrap(i32 %x) {
+; CHECK-LABEL: @add_umin_wrong_wrap(
+; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], 15
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nsw i32 %x, 15
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umin_extra_use(i32 %x, i32* %p) {
+; CHECK-LABEL: @add_umin_extra_use(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 15
+; CHECK-NEXT:    store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 15
+  store i32 %a, i32* %p
+  %c = icmp ult i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+define <2 x i16> @add_umin_vec(<2 x i16> %x) {
+; CHECK-LABEL: @add_umin_vec(
+; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i16> [[X:%.*]], <i16 15, i16 15>
+; CHECK-NEXT:    [[C:%.*]] = icmp ult <2 x i16> [[A]], <i16 240, i16 240>
+; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[C]], <2 x i16> [[A]], <2 x i16> <i16 240, i16 240>
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %a = add nuw <2 x i16> %x, <i16 15, i16 15>
+  %c = icmp ult <2 x i16> %a, <i16 240, i16 240>
+  %r = select <2 x i1> %c, <2 x i16> %a, <2 x i16> <i16 240, i16 240>
+  ret <2 x i16> %r
+}
+
+define i37 @add_umax(i37 %x) {
+; CHECK-LABEL: @add_umax(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i37 [[X:%.*]], 5
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i37 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i37 [[A]], i37 42
+; CHECK-NEXT:    ret i37 [[R]]
+;
+  %a = add nuw i37 %x, 5
+  %c = icmp ugt i37 %a, 42
+  %r = select i1 %c, i37 %a, i37 42
+  ret i37 %r
+}
+
+define i37 @add_umax_constant_limit(i37 %x) {
+; CHECK-LABEL: @add_umax_constant_limit(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i37 [[X:%.*]], 81
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i37 [[A]], 82
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i37 [[A]], i37 82
+; CHECK-NEXT:    ret i37 [[R]]
+;
+  %a = add nuw i37 %x, 81
+  %c = icmp ugt i37 %a, 82
+  %r = select i1 %c, i37 %a, i37 82
+  ret i37 %r
+}
+
+; Negative test
+; TODO: assert that instsimplify always gets this?
+
+define i37 @add_umax_simplify(i37 %x) {
+; CHECK-LABEL: @add_umax_simplify(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i37 [[X:%.*]], 42
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i37 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i37 [[A]], i37 42
+; CHECK-NEXT:    ret i37 [[R]]
+;
+  %a = add nuw i37 %x, 42
+  %c = icmp ugt i37 %a, 42
+  %r = select i1 %c, i37 %a, i37 42
+  ret i37 %r
+}
+
+; Negative test
+; TODO: assert that instsimplify always gets this?
+
+define i32 @add_umax_simplify2(i32 %x) {
+; CHECK-LABEL: @add_umax_simplify2(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 57
+; CHECK-NEXT:    ret i32 [[A]]
+;
+  %a = add nuw i32 %x, 57
+  %c = icmp ugt i32 %a, 56
+  %r = select i1 %c, i32 %a, i32 56
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umax_wrong_pred(i32 %x) {
+; CHECK-LABEL: @add_umax_wrong_pred(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 15
+; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 15
+  %c = icmp sgt i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umax_wrong_wrap(i32 %x) {
+; CHECK-LABEL: @add_umax_wrong_wrap(
+; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], 15
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nsw i32 %x, 15
+  %c = icmp ugt i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+; Negative test
+
+define i32 @add_umax_extra_use(i32 %x, i32* %p) {
+; CHECK-LABEL: @add_umax_extra_use(
+; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 15
+; CHECK-NEXT:    store i32 [[A]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt i32 [[A]], 42
+; CHECK-NEXT:    [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 42
+; CHECK-NEXT:    ret i32 [[R]]
+;
+  %a = add nuw i32 %x, 15
+  store i32 %a, i32* %p
+  %c = icmp ugt i32 %a, 42
+  %r = select i1 %c, i32 %a, i32 42
+  ret i32 %r
+}
+
+define <2 x i33> @add_umax_vec(<2 x i33> %x) {
+; CHECK-LABEL: @add_umax_vec(
+; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i33> [[X:%.*]], <i33 5, i33 5>
+; CHECK-NEXT:    [[C:%.*]] = icmp ugt <2 x i33> [[A]], <i33 240, i33 240>
+; CHECK-NEXT:    [[R:%.*]] = select <2 x i1> [[C]], <2 x i33> [[A]], <2 x i33> <i33 240, i33 240>
+; CHECK-NEXT:    ret <2 x i33> [[R]]
+;
+  %a = add nuw <2 x i33> %x, <i33 5, i33 5>
+  %c = icmp ugt <2 x i33> %a, <i33 240, i33 240>
+  %r = select <2 x i1> %c, <2 x i33> %a, <2 x i33> <i33 240, i33 240>
+  ret <2 x i33> %r
+}




More information about the llvm-commits mailing list