[llvm] b0866f6 - [InstCombine] Precommit umul.with.overflow sign check test.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 17 23:47:27 PST 2020


Author: Florian Hahn
Date: 2020-02-18T08:46:50+01:00
New Revision: b0866f61c127e27855b88873503cbcd5c2e49212

URL: https://github.com/llvm/llvm-project/commit/b0866f61c127e27855b88873503cbcd5c2e49212
DIFF: https://github.com/llvm/llvm-project/commit/b0866f61c127e27855b88873503cbcd5c2e49212.diff

LOG: [InstCombine] Precommit umul.with.overflow sign check test.

Precommit tests for D74141.

Added: 
    llvm/test/Transforms/InstCombine/umul-sign-check.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/umul-sign-check.ll b/llvm/test/Transforms/InstCombine/umul-sign-check.ll
new file mode 100644
index 000000000000..808a2f42dc18
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/umul-sign-check.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -instcombine -S %s | FileCheck %s
+
+; Check that we simplify llvm.umul.with.overflow, if the overflow check is
+; weakened by or (icmp ne %res, 0) %overflow. This is generated by code using
+; __builtin_mul_overflow with negative integer constants, e.g.
+
+;   bool test(unsigned long long v, unsigned long long *res) {
+;     return __builtin_mul_overflow(v, -4775807LL, res);
+;   }
+
+declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0
+
+define i1 @test1(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp  = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  store i64 %mul, i64* %ptr, align 8
+  ret i1 %overflow.1
+}
+
+define i1 @test1_or_ops_swapped(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test1_or_ops_swapped(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[CMP]], [[OVERFLOW]]
+; CHECK-NEXT:    store i64 [[MUL]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+
+
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp  = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %cmp, %overflow
+  store i64 %mul, i64* %ptr, align 8
+  ret i1 %overflow.1
+}
+
+define i1 @test2(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
+; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  %neg = sub i64 0, %mul
+  store i64 %neg, i64* %ptr, align 8
+  ret i1 %overflow.1
+}
+
+declare void @use(i1)
+
+define i1 @test3_multiple_overflow_users(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test3_multiple_overflow_users(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    call void @use(i1 [[OVERFLOW]])
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  call void @use(i1 %overflow)
+  ret i1 %overflow.1
+}
+
+; Do not simplify if %overflow and %mul have multiple uses.
+define i1 @test3_multiple_overflow_and_mul_users(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test3_multiple_overflow_and_mul_users(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
+; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    call void @use(i1 [[OVERFLOW]])
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  %neg = sub i64 0, %mul
+  store i64 %neg, i64* %ptr, align 8
+  call void @use(i1 %overflow)
+  ret i1 %overflow.1
+}
+
+
+declare void @use.2({ i64, i1 })
+define i1 @test3_multiple_res_users(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test3_multiple_res_users(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
+; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    call void @use.2({ i64, i1 } [[RES]])
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  %neg = sub i64 0, %mul
+  store i64 %neg, i64* %ptr, align 8
+  call void @use.2({ i64, i1 } %res)
+  ret i1 %overflow.1
+}
+
+declare void @use.3(i64)
+
+; Simplify if %mul has multiple uses.
+define i1 @test3_multiple_mul_users(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test3_multiple_mul_users(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
+; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    call void @use.3(i64 [[MUL]])
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp ne i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  %neg = sub i64 0, %mul
+  store i64 %neg, i64* %ptr, align 8
+  call void @use.3(i64 %mul)
+  ret i1 %overflow.1
+}
+
+
+
+define i1 @test4_no_icmp_ne(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: @test4_no_icmp_ne(
+; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
+; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[MUL]], 0
+; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
+; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
+; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
+; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
+;
+  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+  %overflow = extractvalue { i64, i1 } %res, 1
+  %mul = extractvalue { i64, i1 } %res, 0
+  %cmp = icmp sgt i64 %mul, 0
+  %overflow.1 = or i1 %overflow, %cmp
+  %neg = sub i64 0, %mul
+  store i64 %neg, i64* %ptr, align 8
+  ret i1 %overflow.1
+}
+
+attributes #0 = { nounwind readnone speculatable willreturn }


        


More information about the llvm-commits mailing list