[llvm] r337859 - [SCEV] Add zext(C + x + ...) -> D + zext(C-D + x + ...)<nuw><nsw> transform

Roman Tereshin via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 24 14:48:56 PDT 2018


Author: rtereshin
Date: Tue Jul 24 14:48:56 2018
New Revision: 337859

URL: http://llvm.org/viewvc/llvm-project?rev=337859&view=rev
Log:
[SCEV] Add zext(C + x + ...) -> D + zext(C-D + x + ...)<nuw><nsw> transform

if the top level addition in (D + (C-D + x + ...)) could be proven to
not wrap, where the choice of D also maximizes the number of trailing
zeroes of (C-D + x + ...), ensuring homogeneous behaviour of the
transformation and better canonicalization of such expressions.

This enables better canonicalization of expressions like

  1 + zext(5 + 20 * %x + 24 * %y)  and
      zext(6 + 20 * %x + 24 * %y)

which get both transformed to

  2 + zext(4 + 20 * %x + 24 * %y)

This pattern is common in address arithmetics and the transformation
makes it easier for passes like LoadStoreVectorizer to prove that 2 or
more memory accesses are consecutive and optimize (vectorize) them.

Reviewed By: mzolotukhin

Differential Revision: https://reviews.llvm.org/D48853

Added:
    llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
Modified:
    llvm/trunk/lib/Analysis/ScalarEvolution.cpp
    llvm/trunk/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll

Modified: llvm/trunk/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolution.cpp?rev=337859&r1=337858&r2=337859&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolution.cpp Tue Jul 24 14:48:56 2018
@@ -1777,6 +1777,44 @@ ScalarEvolution::getZeroExtendExpr(const
         Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
       return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
     }
+
+    // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))<nuw>
+    // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
+    // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
+    //
+    // Useful while proving that address arithmetic expressions are equal or
+    // differ by a small constant amount, see LoadStoreVectorizer pass.
+    if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
+      // Often address arithmetics contain expressions like
+      // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
+      // ConstantRange is unable to prove that it's possible to transform
+      // (5 + (4 * X)) to (1 + (4 + (4 * X))) w/o underflowing:
+      //
+      // |  Expression   |     ConstantRange      |       KnownBits       |
+      // |---------------|------------------------|-----------------------|
+      // | i8 4 * X      | [L: 0, U: 253)         | XXXX XX00             |
+      // |               |   => Min: 0, Max: 252  |   => Min: 0, Max: 252 |
+      // |               |                        |                       |
+      // | i8 4 * X + 5  | [L: 5, U: 2) (wrapped) | YYYY YY01             |
+      // |         (101) |   => Min: 0, Max: 255  |   => Min: 1, Max: 253 |
+      //
+      // As KnownBits are not available for SCEV expressions, use number of
+      // trailing zeroes instead:
+      APInt C = SC->getAPInt();
+      uint32_t TZ = C.getBitWidth();
+      for (unsigned I = 1, E = SA->getNumOperands(); I < E && TZ; ++I)
+        TZ = std::min(TZ, GetMinTrailingZeros(SA->getOperand(I)));
+      if (TZ) {
+        APInt D = TZ < C.getBitWidth() ? C.trunc(TZ).zext(C.getBitWidth()) : C;
+        if (D != 0) {
+          const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
+          const SCEV *SResidual =
+              getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
+          const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
+          return getAddExpr(SZExtD, SZExtR, SCEV::FlagNUW, Depth + 1);
+        }
+      }
+    }
   }
 
   if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {

Modified: llvm/trunk/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll?rev=337859&r1=337858&r2=337859&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll Tue Jul 24 14:48:56 2018
@@ -120,3 +120,84 @@ define void @f2(i8* %len_addr) {
 
   ret void
 }
+
+ at z_addr = external global [16 x i8], align 4
+ at z_addr_noalign = external global [16 x i8]
+
+%union = type { [10 x [4 x float]] }
+ at tmp_addr = external unnamed_addr global { %union, [2000 x i8] }
+
+define void @f3(i8* %x_addr, i8* %y_addr, i32* %tmp_addr) {
+; CHECK-LABEL: Classifying expressions for: @f3
+ entry:
+  %x = load i8, i8* %x_addr
+  %t0 = mul i8 %x, 4
+  %t1 = add i8 %t0, 5
+  %t1.zext = zext i8 %t1 to i16
+; CHECK:  %t1.zext = zext i8 %t1 to i16
+; CHECK-NEXT:  -->  (1 + (zext i8 (4 + (4 * %x)) to i16))<nuw><nsw> U: [1,254) S: [1,257)
+
+  %q0 = mul i8 %x, 4
+  %q1 = add i8 %q0, 7
+  %q1.zext = zext i8 %q1 to i16
+; CHECK:  %q1.zext = zext i8 %q1 to i16
+; CHECK-NEXT:  -->  (3 + (zext i8 (4 + (4 * %x)) to i16))<nuw><nsw> U: [3,256) S: [3,259)
+
+  %p0 = mul i8 %x, 4
+  %p1 = add i8 %p0, 8
+  %p1.zext = zext i8 %p1 to i16
+; CHECK:  %p1.zext = zext i8 %p1 to i16
+; CHECK-NEXT:  -->  (zext i8 (8 + (4 * %x)) to i16) U: [0,253) S: [0,256)
+
+  %r0 = mul i8 %x, 4
+  %r1 = add i8 %r0, 254
+  %r1.zext = zext i8 %r1 to i16
+; CHECK:  %r1.zext = zext i8 %r1 to i16
+; CHECK-NEXT:  -->  (2 + (zext i8 (-4 + (4 * %x)) to i16))<nuw><nsw> U: [2,255) S: [2,258)
+
+  %y = load i8, i8* %y_addr
+  %s0 = mul i8 %x, 32
+  %s1 = mul i8 %y, 36
+  %s2 = add i8 %s0, %s1
+  %s3 = add i8 %s2, 5
+  %s3.zext = zext i8 %s3 to i16
+; CHECK:  %s3.zext = zext i8 %s3 to i16
+; CHECK-NEXT:  -->  (1 + (zext i8 (4 + (32 * %x) + (36 * %y)) to i16))<nuw><nsw> U: [1,254) S: [1,257)
+
+  %ptr = bitcast [16 x i8]* @z_addr to i8*
+  %int0 = ptrtoint i8* %ptr to i32
+  %int5 = add i32 %int0, 5
+  %int.zext = zext i32 %int5 to i64
+; CHECK:  %int.zext = zext i32 %int5 to i64
+; CHECK-NEXT:  -->  (1 + (zext i32 (4 + %int0) to i64))<nuw><nsw> U: [1,4294967294) S: [1,4294967297)
+
+  %ptr_noalign = bitcast [16 x i8]* @z_addr_noalign to i8*
+  %int0_na = ptrtoint i8* %ptr_noalign to i32
+  %int5_na = add i32 %int0_na, 5
+  %int.zext_na = zext i32 %int5_na to i64
+; CHECK:  %int.zext_na = zext i32 %int5_na to i64
+; CHECK-NEXT:  -->  (zext i32 (5 + %int0_na) to i64) U: [0,4294967296) S: [0,4294967296)
+
+  %tmp = load i32, i32* %tmp_addr
+  %mul = and i32 %tmp, -4
+  %add4 = add i32 %mul, 4
+  %add4.zext = zext i32 %add4 to i64
+  %sunkaddr3 = mul i64 %add4.zext, 4
+  %sunkaddr4 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @tmp_addr to i8*), i64 %sunkaddr3
+  %sunkaddr5 = getelementptr inbounds i8, i8* %sunkaddr4, i64 4096
+  %addr4.cast = bitcast i8* %sunkaddr5 to i32*
+  %addr4.incr = getelementptr i32, i32* %addr4.cast, i64 1
+; CHECK:  %addr4.incr = getelementptr i32, i32* %addr4.cast, i64 1
+; CHECK-NEXT:  -->  ([[C:4100]] + ([[SIZE:4]] * (zext i32 ([[OFFSET:4]] + ([[STRIDE:4]] * (%tmp /u [[STRIDE]]))<nuw>) to i64))<nuw><nsw> + @tmp_addr)
+
+  %add5 = add i32 %mul, 5
+  %add5.zext = zext i32 %add5 to i64
+  %sunkaddr0 = mul i64 %add5.zext, 4
+  %sunkaddr1 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @tmp_addr to i8*), i64 %sunkaddr0
+  %sunkaddr2 = getelementptr inbounds i8, i8* %sunkaddr1, i64 4096
+  %addr5.cast = bitcast i8* %sunkaddr2 to i32*
+; CHECK:  %addr5.cast = bitcast i8* %sunkaddr2 to i32*
+; CHECK-NEXT:  -->  ([[C]]    +   ([[SIZE]]  *  (zext i32 ([[OFFSET]]  +  ([[STRIDE]]  *  (%tmp /u [[STRIDE]]))<nuw>) to i64))<nuw><nsw> + @tmp_addr)
+
+  ret void
+}

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll?rev=337859&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll Tue Jul 24 14:48:56 2018
@@ -0,0 +1,78 @@
+; RUN: opt -codegenprepare -load-store-vectorizer %s -S -o - | FileCheck %s
+; RUN: opt                 -load-store-vectorizer %s -S -o - | FileCheck %s
+
+target triple = "x86_64--"
+
+%union = type { { [4 x [4 x [4 x [16 x float]]]], [4 x [4 x [4 x [16 x float]]]], [10 x [10 x [4 x float]]] } }
+
+ at global_pointer = external unnamed_addr global { %union, [2000 x i8] }, align 4
+
+; Function Attrs: convergent nounwind
+define void @test(i32 %base) #0 {
+; CHECK-LABEL: @test(
+; CHECK-NOT: load i32
+; CHECK: load <2 x i32>
+; CHECK-NOT: load i32
+entry:
+  %mul331 = and i32 %base, -4
+  %add350.4 = add i32 4, %mul331
+  %idx351.4 = zext i32 %add350.4 to i64
+  %arrayidx352.4 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.4
+  %tmp296.4 = bitcast float* %arrayidx352.4 to i32*
+  %add350.5 = add i32 5, %mul331
+  %idx351.5 = zext i32 %add350.5 to i64
+  %arrayidx352.5 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.5
+  %tmp296.5 = bitcast float* %arrayidx352.5 to i32*
+  %cnd = icmp ult i32 %base, 1000
+  br i1 %cnd, label %loads, label %exit
+
+loads:
+  ; If and only if the loads are in a different BB from the GEPs codegenprepare
+  ; would try to turn the GEPs into math, which makes LoadStoreVectorizer's job
+  ; harder
+  %tmp297.4 = load i32, i32* %tmp296.4, align 4, !tbaa !0
+  %tmp297.5 = load i32, i32* %tmp296.5, align 4, !tbaa !0
+  br label %exit
+
+exit:
+  ret void
+}
+
+; Function Attrs: convergent nounwind
+define void @test.codegenprepared(i32 %base) #0 {
+; CHECK-LABEL: @test.codegenprepared(
+; CHECK-NOT: load i32
+; CHECK: load <2 x i32>
+; CHECK-NOT: load i32
+entry:
+  %mul331 = and i32 %base, -4
+  %add350.4 = add i32 4, %mul331
+  %idx351.4 = zext i32 %add350.4 to i64
+  %add350.5 = add i32 5, %mul331
+  %idx351.5 = zext i32 %add350.5 to i64
+  %cnd = icmp ult i32 %base, 1000
+  br i1 %cnd, label %loads, label %exit
+
+loads:                                            ; preds = %entry
+  %sunkaddr = mul i64 %idx351.4, 4
+  %sunkaddr1 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr
+  %sunkaddr2 = getelementptr inbounds i8, i8* %sunkaddr1, i64 4096
+  %0 = bitcast i8* %sunkaddr2 to i32*
+  %tmp297.4 = load i32, i32* %0, align 4, !tbaa !0
+  %sunkaddr3 = mul i64 %idx351.5, 4
+  %sunkaddr4 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr3
+  %sunkaddr5 = getelementptr inbounds i8, i8* %sunkaddr4, i64 4096
+  %1 = bitcast i8* %sunkaddr5 to i32*
+  %tmp297.5 = load i32, i32* %1, align 4, !tbaa !0
+  br label %exit
+
+exit:                                             ; preds = %loads, %entry
+  ret void
+}
+
+attributes #0 = { convergent nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C++ TBAA"}




More information about the llvm-commits mailing list