[llvm] r366747 - [NFC][PhaseOrdering] Add tests showcasing the problems of unsigned multiply overflow check

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 22 15:08:35 PDT 2019


Author: lebedevri
Date: Mon Jul 22 15:08:35 2019
New Revision: 366747

URL: http://llvm.org/viewvc/llvm-project?rev=366747&view=rev
Log:
[NFC][PhaseOrdering] Add tests showcasing the problems of unsigned multiply overflow check

While we can form the @llvm.mul.with.overflow easily,
we are still left with that check that was guarding against div-by-0.
And in the second case we won't even flatten the CFG.

Added:
    llvm/trunk/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll

Added: llvm/trunk/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll?rev=366747&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll (added)
+++ llvm/trunk/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll Mon Jul 22 15:08:35 2019
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,SIMPLIFYCFG
+; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINEONLY
+; RUN: opt -instcombine -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,BOTH
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-linux-gnu"
+
+; #include <limits>
+; #include <cstdint>
+;
+; using size_type = std::size_t;
+; bool will_not_overflow(size_type size, size_type nmemb) {
+;   return (size != 0 && (nmemb > std::numeric_limits<size_type>::max() / size));
+; }
+
+define i1 @will_not_overflow(i64 %arg, i64 %arg1) {
+; ALL-LABEL: @will_not_overflow(
+; ALL-NEXT:  bb:
+; ALL-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; ALL-NEXT:    br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; ALL:       bb2:
+; ALL-NEXT:    [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; ALL-NEXT:    [[T4:%.*]] = icmp ult i64 [[T3]], [[ARG1:%.*]]
+; ALL-NEXT:    br label [[BB5]]
+; ALL:       bb5:
+; ALL-NEXT:    [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; ALL-NEXT:    ret i1 [[T6]]
+;
+bb:
+  %t0 = icmp eq i64 %arg, 0
+  br i1 %t0, label %bb5, label %bb2
+
+bb2:                                              ; preds = %bb
+  %t3 = udiv i64 -1, %arg
+  %t4 = icmp ult i64 %t3, %arg1
+  br label %bb5
+
+bb5:                                              ; preds = %bb2, %bb
+  %t6 = phi i1 [ false, %bb ], [ %t4, %bb2 ]
+  ret i1 %t6
+}
+
+; Same as @will_not_overflow, but inverting return value.
+
+define i1 @will_overflow(i64 %arg, i64 %arg1) {
+; SIMPLIFYCFG-LABEL: @will_overflow(
+; SIMPLIFYCFG-NEXT:  bb:
+; SIMPLIFYCFG-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; SIMPLIFYCFG-NEXT:    br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; SIMPLIFYCFG:       bb2:
+; SIMPLIFYCFG-NEXT:    [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; SIMPLIFYCFG-NEXT:    [[T4:%.*]] = icmp ult i64 [[T3]], [[ARG1:%.*]]
+; SIMPLIFYCFG-NEXT:    br label [[BB5]]
+; SIMPLIFYCFG:       bb5:
+; SIMPLIFYCFG-NEXT:    [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; SIMPLIFYCFG-NEXT:    [[T7:%.*]] = xor i1 [[T6]], true
+; SIMPLIFYCFG-NEXT:    ret i1 [[T7]]
+;
+; INSTCOMBINE-LABEL: @will_overflow(
+; INSTCOMBINE-NEXT:  bb:
+; INSTCOMBINE-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
+; INSTCOMBINE-NEXT:    br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
+; INSTCOMBINE:       bb2:
+; INSTCOMBINE-NEXT:    [[T3:%.*]] = udiv i64 -1, [[ARG]]
+; INSTCOMBINE-NEXT:    [[T4:%.*]] = icmp uge i64 [[T3]], [[ARG1:%.*]]
+; INSTCOMBINE-NEXT:    br label [[BB5]]
+; INSTCOMBINE:       bb5:
+; INSTCOMBINE-NEXT:    [[T6:%.*]] = phi i1 [ true, [[BB:%.*]] ], [ [[T4]], [[BB2]] ]
+; INSTCOMBINE-NEXT:    ret i1 [[T6]]
+;
+bb:
+  %t0 = icmp eq i64 %arg, 0
+  br i1 %t0, label %bb5, label %bb2
+
+bb2:                                              ; preds = %bb
+  %t3 = udiv i64 -1, %arg
+  %t4 = icmp ult i64 %t3, %arg1
+  br label %bb5
+
+bb5:                                              ; preds = %bb2, %bb
+  %t6 = phi i1 [ false, %bb ], [ %t4, %bb2 ]
+  %t7 = xor i1 %t6, true
+  ret i1 %t7
+}




More information about the llvm-commits mailing list