[llvm] 44977a1 - Add tests for binops with conditions/assume constraints
Noah Goldstein via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 27 15:50:35 PST 2023
Author: Noah Goldstein
Date: 2023-01-27T17:45:35-06:00
New Revision: 44977a155f24be3cdbcd2a57acbfd6da2529abde
URL: https://github.com/llvm/llvm-project/commit/44977a155f24be3cdbcd2a57acbfd6da2529abde
DIFF: https://github.com/llvm/llvm-project/commit/44977a155f24be3cdbcd2a57acbfd6da2529abde.diff
LOG: Add tests for binops with conditions/assume constraints
Reviewed By: nikic
Differential Revision: https://reviews.llvm.org/D140849
Added:
llvm/test/Transforms/InstCombine/icmp-binop.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/icmp-binop.ll b/llvm/test/Transforms/InstCombine/icmp-binop.ll
new file mode 100644
index 000000000000..1f4a0de2b812
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-binop.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare void @use64(i64)
+declare void @llvm.assume(i1)
+
+define i1 @mul_unkV_oddC_eq(i32 %v) {
+; CHECK-LABEL: @mul_unkV_oddC_eq(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[V:%.*]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %mul = mul i32 %v, 3
+ %cmp = icmp eq i32 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_unkV_oddC_eq_nonzero(i32 %v) {
+; CHECK-LABEL: @mul_unkV_oddC_eq_nonzero(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[V:%.*]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[MUL]], 4
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %mul = mul i32 %v, 3
+ %cmp = icmp eq i32 %mul, 4
+ ret i1 %cmp
+}
+
+define <2 x i1> @mul_unkV_oddC_ne_vec(<2 x i64> %v) {
+; CHECK-LABEL: @mul_unkV_oddC_ne_vec(
+; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i64> [[V:%.*]], <i64 3, i64 3>
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i64> [[MUL]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %mul = mul <2 x i64> %v, <i64 3, i64 3>
+ %cmp = icmp ne <2 x i64> %mul, <i64 0, i64 0>
+ ret <2 x i1> %cmp
+}
+
+define i1 @mul_assumeoddV_asumeoddV_eq(i16 %v, i16 %v2) {
+; CHECK-LABEL: @mul_assumeoddV_asumeoddV_eq(
+; CHECK-NEXT: [[LB:%.*]] = and i16 [[V:%.*]], 1
+; CHECK-NEXT: [[ODD:%.*]] = icmp ne i16 [[LB]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]])
+; CHECK-NEXT: [[LB2:%.*]] = and i16 [[V2:%.*]], 1
+; CHECK-NEXT: [[ODD2:%.*]] = icmp ne i16 [[LB2]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[ODD2]])
+; CHECK-NEXT: ret i1 true
+;
+ %lb = and i16 %v, 1
+ %odd = icmp ne i16 %lb, 0
+ call void @llvm.assume(i1 %odd)
+ %lb2 = and i16 %v2, 1
+ %odd2 = icmp ne i16 %lb2, 0
+ call void @llvm.assume(i1 %odd2)
+ %mul = mul i16 %v, %v2
+ %cmp = icmp ne i16 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_unkV_oddC_sge(i8 %v) {
+; CHECK-LABEL: @mul_unkV_oddC_sge(
+; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[V:%.*]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[MUL]], -1
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %mul = mul i8 %v, 3
+ %cmp = icmp sge i8 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_reused_unkV_oddC_ne(i64 %v) {
+; CHECK-LABEL: @mul_reused_unkV_oddC_ne(
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V:%.*]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT: call void @use64(i64 [[MUL]])
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %mul = mul i64 %v, 3
+ %cmp = icmp ne i64 %mul, 0
+ call void @use64(i64 %mul)
+ ret i1 %cmp
+}
+
+define i1 @mul_assumeoddV_unkV_eq(i16 %v, i16 %v2) {
+; CHECK-LABEL: @mul_assumeoddV_unkV_eq(
+; CHECK-NEXT: [[LB:%.*]] = and i16 [[V2:%.*]], 1
+; CHECK-NEXT: [[ODD:%.*]] = icmp ne i16 [[LB]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[V:%.*]], [[V2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %lb = and i16 %v2, 1
+ %odd = icmp eq i16 %lb, 1
+ call void @llvm.assume(i1 %odd)
+ %mul = mul i16 %v, %v2
+ %cmp = icmp eq i16 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_reusedassumeoddV_unkV_ne(i64 %v, i64 %v2) {
+; CHECK-LABEL: @mul_reusedassumeoddV_unkV_ne(
+; CHECK-NEXT: [[LB:%.*]] = and i64 [[V:%.*]], 1
+; CHECK-NEXT: [[ODD:%.*]] = icmp ne i64 [[LB]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT: call void @use64(i64 [[MUL]])
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %lb = and i64 %v, 1
+ %odd = icmp ne i64 %lb, 0
+ call void @llvm.assume(i1 %odd)
+ %mul = mul i64 %v, %v2
+ %cmp = icmp ne i64 %mul, 0
+ call void @use64(i64 %mul)
+ ret i1 %cmp
+}
+
+define <2 x i1> @mul_setoddV_unkV_ne(<2 x i32> %v1, <2 x i32> %v2) {
+; CHECK-LABEL: @mul_setoddV_unkV_ne(
+; CHECK-NEXT: [[V:%.*]] = or <2 x i32> [[V1:%.*]], <i32 1, i32 1>
+; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i32> [[V]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[MUL]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %v = or <2 x i32> %v1, <i32 1, i32 1>
+ %mul = mul <2 x i32> %v, %v2
+ %cmp = icmp ne <2 x i32> %mul, <i32 0, i32 0>
+ ret <2 x i1> %cmp
+}
+
+define i1 @mul_broddV_unkV_eq(i16 %v, i16 %v2) {
+; CHECK-LABEL: @mul_broddV_unkV_eq(
+; CHECK-NEXT: [[LB:%.*]] = and i16 [[V2:%.*]], 1
+; CHECK-NEXT: [[ODD_NOT:%.*]] = icmp eq i16 [[LB]], 0
+; CHECK-NEXT: br i1 [[ODD_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]]
+; CHECK: true:
+; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[V:%.*]], [[V2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+; CHECK: false:
+; CHECK-NEXT: call void @use64(i16 [[V]])
+; CHECK-NEXT: ret i1 false
+;
+ %lb = and i16 %v2, 1
+ %odd = icmp eq i16 %lb, 1
+ br i1 %odd, label %true, label %false
+true:
+ %mul = mul i16 %v, %v2
+ %cmp = icmp eq i16 %mul, 0
+ ret i1 %cmp
+false:
+ call void @use64(i16 %v)
+ ret i1 false
+}
+
+define i1 @mul_unkV_evenC_ne(i64 %v) {
+; CHECK-LABEL: @mul_unkV_evenC_ne(
+; CHECK-NEXT: [[MUL_MASK:%.*]] = and i64 [[V:%.*]], 4611686018427387903
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL_MASK]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %mul = mul i64 %v, 4
+ %cmp = icmp ne i64 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_assumenzV_asumenzV_eq(i64 %v, i64 %v2) {
+; CHECK-LABEL: @mul_assumenzV_asumenzV_eq(
+; CHECK-NEXT: [[NZ:%.*]] = icmp ne i64 [[V:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]])
+; CHECK-NEXT: [[NZ2:%.*]] = icmp ne i64 [[V2:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[NZ2]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V]], [[V2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %nz = icmp ne i64 %v, 0
+ call void @llvm.assume(i1 %nz)
+ %nz2 = icmp ne i64 %v2, 0
+ call void @llvm.assume(i1 %nz2)
+ %mul = mul i64 %v, %v2
+ %cmp = icmp eq i64 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_assumenzV_unkV_nsw_ne(i32 %v, i32 %v2) {
+; CHECK-LABEL: @mul_assumenzV_unkV_nsw_ne(
+; CHECK-NEXT: [[NZ:%.*]] = icmp ne i32 [[V:%.*]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]])
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[V]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %nz = icmp ne i32 %v, 0
+ call void @llvm.assume(i1 %nz)
+ %mul = mul nsw i32 %v, %v2
+ %cmp = icmp ne i32 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_selectnzV_unkV_nsw_ne(i8 %v, i8 %v2) {
+; CHECK-LABEL: @mul_selectnzV_unkV_nsw_ne(
+; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[V:%.*]], 0
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[V]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[MUL]], 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[NZ]], i1 [[CMP]], i1 false
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = icmp ne i8 %v, 0
+ %mul = mul nsw i8 %v, %v2
+ %cmp = icmp ne i8 %mul, 0
+ %r = select i1 %nz, i1 %cmp, i1 false
+ ret i1 %r
+}
+
+define <2 x i1> @mul_unkV_unkV_nsw_nuw_ne(<2 x i16> %v, <2 x i16> %v2) {
+; CHECK-LABEL: @mul_unkV_unkV_nsw_nuw_ne(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw <2 x i16> [[V:%.*]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[MUL]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %mul = mul nuw nsw <2 x i16> %v, %v2
+ %cmp = icmp ne <2 x i16> %mul, <i16 0, i16 0>
+ ret <2 x i1> %cmp
+}
+
+define i1 @mul_setnzV_unkV_nuw_eq(i8 %v1, i8 %v2) {
+; CHECK-LABEL: @mul_setnzV_unkV_nuw_eq(
+; CHECK-NEXT: [[V:%.*]] = or i8 [[V1:%.*]], 2
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[V]], [[V2:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %v = or i8 %v1, 2
+ %mul = mul nuw i8 %v, %v2
+ %cmp = icmp eq i8 %mul, 0
+ ret i1 %cmp
+}
+
+define i1 @mul_brnzV_unkV_nuw_eq(i64 %v, i64 %v2) {
+; CHECK-LABEL: @mul_brnzV_unkV_nuw_eq(
+; CHECK-NEXT: [[NZ_NOT:%.*]] = icmp eq i64 [[V2:%.*]], 0
+; CHECK-NEXT: br i1 [[NZ_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]]
+; CHECK: true:
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i64 [[V:%.*]], [[V2]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[MUL]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+; CHECK: false:
+; CHECK-NEXT: call void @use64(i64 [[V]])
+; CHECK-NEXT: ret i1 false
+;
+ %nz = icmp ne i64 %v2, 0
+ br i1 %nz, label %true, label %false
+true:
+ %mul = mul nuw i64 %v, %v2
+ %cmp = icmp eq i64 %mul, 0
+ ret i1 %cmp
+false:
+ call void @use64(i64 %v)
+ ret i1 false
+}
More information about the llvm-commits
mailing list