[llvm] [InstCombine] fold `sub(zext(ptrtoint), zext(ptrtoint))` (PR #115369)

Nikolay Panchenko via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 8 14:25:06 PST 2024


https://github.com/npanchen updated https://github.com/llvm/llvm-project/pull/115369

>From e360e21ee174ac7fb59a3e6df1bb345eae5ed11e Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <npanchen at modular.com>
Date: Thu, 7 Nov 2024 15:19:39 -0500
Subject: [PATCH 1/2] [InstCombine] fold `sub(zext(ptrtoint),zext(ptrtoint))`

On a 32-bit target if pointer arithmetic is used in i64 computation,
the missed folding in InstCombine results to suboptimal performance,
unlike same code compiled for 64bit target
---
 .../InstCombine/InstCombineAddSub.cpp         |  4 +-
 llvm/test/Transforms/InstCombine/sub-gep.ll   | 56 +++++++++++++++++++
 2 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 21588aca512758..d931f89b9af421 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -2618,8 +2618,8 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // Optimize pointer differences into the same array into a size.  Consider:
   //  &A[10] - &A[0]: we should compile this to "10".
   Value *LHSOp, *RHSOp;
-  if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
-      match(Op1, m_PtrToInt(m_Value(RHSOp))))
+  if (match(Op0, m_ZExtOrSelf(m_PtrToInt(m_Value(LHSOp)))) &&
+      match(Op1, m_ZExtOrSelf(m_PtrToInt(m_Value(RHSOp)))))
     if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
                                                I.hasNoUnsignedWrap()))
       return replaceInstUsesWith(I, Res);
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index b773d106b2c98a..ee18fd607823a5 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -270,6 +270,36 @@ define i64 @test25(ptr %P, i64 %A){
   ret i64 %G
 }
 
+define i64 @zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
+; CHECK-LABLE: @zext_ptrtoint_sub_ptrtoint(
+; CHECK-NEXT:  %1 = sext i32 %offset to i64
+; CHECK-NEXT:  %A = getelementptr bfloat, ptr @Arr, i64 %1
+; CHECK-NEXT:  %2 = ptrtoint ptr %A to i64
+; CHECK-NEXT:  %C = and i64 %2, 4294967294
+; CHECK-NEXT:  %D = sub i64 %C, ptrtoint (ptr @Arr to i64)
+; CHECK-NEXT:  ret i64 %D
+  %A = getelementptr bfloat, ptr @Arr, i32 %offset
+  %B = ptrtoint ptr %A to i32
+  %C = zext i32 %B to i64
+  %D = sub i64 %C, ptrtoint (ptr @Arr to i64)
+  ret i64 %D
+}
+
+define i64 @ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
+; CHECK-LABLE: @ptrtoint_sub_zext_ptrtoint(
+; CHECK-NEXT:  %1 = sext i32 %offset to i64
+; CHECK-NEXT:  %A = getelementptr bfloat, ptr @Arr, i64 %1
+; CHECK-NEXT:  %2 = ptrtoint ptr %A to i64
+; CHECK-NEXT:  %C = and i64 %2, 4294967294
+; CHECK-NEXT:  %D = sub i64 ptrtoint (ptr @Arr to i64), %C
+; CHECK-NEXT:  ret i64 %D
+  %A = getelementptr bfloat, ptr @Arr, i32 %offset
+  %B = ptrtoint ptr %A to i32
+  %C = zext i32 %B to i64
+  %D = sub i64 ptrtoint (ptr @Arr to i64), %C
+  ret i64 %D
+}
+
 @Arr_as1 = external addrspace(1) global [42 x i16]
 
 define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
@@ -285,6 +315,32 @@ define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
   ret i16 %G
 }
 
+define i64 @zext_ptrtoint_sub_ptrtoint_as1(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABLE: @zext_ptrtoint_sub_ptrtoint_as1(
+; CHECK-NEXT:  %1 = trunc i32 %offset to i16
+; CHECK-NEXT:  %A.idx = shl i16 %1, 1
+; CHECK-NEXT:  %D = sext i16 %A.idx to i64
+; CHECK-NEXT:  ret i64 %D
+  %A = getelementptr bfloat, ptr addrspace(1) @Arr_as1, i32 %offset
+  %B = ptrtoint ptr addrspace(1) %A to i32
+  %C = zext i32 %B to i64
+  %D = sub i64 %C, ptrtoint (ptr addrspace(1) @Arr_as1 to i64)
+  ret i64 %D
+}
+
+define i64 @ptrtoint_sub_zext_ptrtoint_as1(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABLE: @ptrtoint_sub_zext_ptrtoint_as1(
+; CHECK-NEXT:  %1 = trunc i32 %offset to i16
+; CHECK-NEXT:  %A.idx.neg = mul i16 %1, -2
+; CHECK-NEXT:  %D = sext i16 %A.idx.neg to i64
+; CHECK-NEXT:  ret i64 %D
+  %A = getelementptr bfloat, ptr addrspace(1) @Arr_as1, i32 %offset
+  %B = ptrtoint ptr addrspace(1) %A to i32
+  %C = zext i32 %B to i64
+  %D = sub i64 ptrtoint (ptr addrspace(1) @Arr_as1 to i64), %C
+  ret i64 %D
+}
+
 define i64 @test30(ptr %foo, i64 %i, i64 %j) {
 ; CHECK-LABEL: @test30(
 ; CHECK-NEXT:    [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2

>From 5c692fdb697e70ec7df72709910f6a5acd26cdd3 Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <npanchen at modular.com>
Date: Fri, 8 Nov 2024 17:24:30 -0500
Subject: [PATCH 2/2] Changed pattern matching

---
 .../InstCombine/InstCombineAddSub.cpp         | 16 +++++++-
 llvm/test/Transforms/InstCombine/sub-gep.ll   | 39 ++++++++++---------
 2 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index d931f89b9af421..4f14b1eb57785c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -2618,8 +2618,8 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
   // Optimize pointer differences into the same array into a size.  Consider:
   //  &A[10] - &A[0]: we should compile this to "10".
   Value *LHSOp, *RHSOp;
-  if (match(Op0, m_ZExtOrSelf(m_PtrToInt(m_Value(LHSOp)))) &&
-      match(Op1, m_ZExtOrSelf(m_PtrToInt(m_Value(RHSOp)))))
+  if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
+      match(Op1, m_PtrToInt(m_Value(RHSOp))))
     if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType(),
                                                I.hasNoUnsignedWrap()))
       return replaceInstUsesWith(I, Res);
@@ -2631,6 +2631,18 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
                                                /* IsNUW */ false))
       return replaceInstUsesWith(I, Res);
 
+  if (match(Op0, m_ZExt(m_PtrToInt(m_Value(LHSOp)))) &&
+      match(Op1, m_PtrToInt(m_Value(RHSOp))) && isa<GlobalValue>(RHSOp)) {
+    Value *Offset;
+    if (match(LHSOp, m_GEP(m_Specific(RHSOp), m_Value(Offset)))) {
+      auto *GEP = cast<GEPOperator>(LHSOp);
+      if (GEP->isInBounds()) {
+        Value *Res = Builder.CreateZExt(EmitGEPOffset(GEP), I.getType());
+        return replaceInstUsesWith(I, Res);
+      }
+    }
+  }
+
   // Canonicalize a shifty way to code absolute value to the common pattern.
   // There are 2 potential commuted variants.
   // We're relying on the fact that we only do this transform when the shift has
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index ee18fd607823a5..e07539f4d20f32 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -passes=instcombine < %s | FileCheck %s
 
-target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-p2:32:32"
 
 define i64 @test_inbounds(ptr %base, i64 %idx) {
 ; CHECK-LABEL: @test_inbounds(
@@ -271,7 +271,7 @@ define i64 @test25(ptr %P, i64 %A){
 }
 
 define i64 @zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
-; CHECK-LABLE: @zext_ptrtoint_sub_ptrtoint(
+; CHECK-LABEL: @zext_ptrtoint_sub_ptrtoint(
 ; CHECK-NEXT:  %1 = sext i32 %offset to i64
 ; CHECK-NEXT:  %A = getelementptr bfloat, ptr @Arr, i64 %1
 ; CHECK-NEXT:  %2 = ptrtoint ptr %A to i64
@@ -286,7 +286,7 @@ define i64 @zext_ptrtoint_sub_ptrtoint(ptr %p, i32 %offset) {
 }
 
 define i64 @ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
-; CHECK-LABLE: @ptrtoint_sub_zext_ptrtoint(
+; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint(
 ; CHECK-NEXT:  %1 = sext i32 %offset to i64
 ; CHECK-NEXT:  %A = getelementptr bfloat, ptr @Arr, i64 %1
 ; CHECK-NEXT:  %2 = ptrtoint ptr %A to i64
@@ -315,29 +315,30 @@ define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
   ret i16 %G
 }
 
-define i64 @zext_ptrtoint_sub_ptrtoint_as1(ptr addrspace(1) %p, i32 %offset) {
-; CHECK-LABLE: @zext_ptrtoint_sub_ptrtoint_as1(
-; CHECK-NEXT:  %1 = trunc i32 %offset to i16
-; CHECK-NEXT:  %A.idx = shl i16 %1, 1
-; CHECK-NEXT:  %D = sext i16 %A.idx to i64
+ at Arr_as2 = external addrspace(2) global [42 x i16]
+define i64 @zext_ptrtoint_sub_ptrtoint_as2(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: @zext_ptrtoint_sub_ptrtoint_as2(
+; CHECK-NEXT:  %A.idx = shl nsw i32 %offset, 1
+; CHECK-NEXT:  %D = zext i32 %A.idx to i64
 ; CHECK-NEXT:  ret i64 %D
-  %A = getelementptr bfloat, ptr addrspace(1) @Arr_as1, i32 %offset
-  %B = ptrtoint ptr addrspace(1) %A to i32
+  %A = getelementptr inbounds bfloat, ptr addrspace(2) @Arr_as2, i32 %offset
+  %B = ptrtoint ptr addrspace(2) %A to i32
   %C = zext i32 %B to i64
-  %D = sub i64 %C, ptrtoint (ptr addrspace(1) @Arr_as1 to i64)
+  %D = sub i64 %C, ptrtoint (ptr addrspace(2) @Arr_as2 to i64)
   ret i64 %D
 }
 
-define i64 @ptrtoint_sub_zext_ptrtoint_as1(ptr addrspace(1) %p, i32 %offset) {
-; CHECK-LABLE: @ptrtoint_sub_zext_ptrtoint_as1(
-; CHECK-NEXT:  %1 = trunc i32 %offset to i16
-; CHECK-NEXT:  %A.idx.neg = mul i16 %1, -2
-; CHECK-NEXT:  %D = sext i16 %A.idx.neg to i64
+define i64 @ptrtoint_sub_zext_ptrtoint_as2(ptr addrspace(2) %p, i32 %offset) {
+; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint_as2(
+; CHECK-NEXT:  %A = getelementptr inbounds bfloat, ptr addrspace(2) @Arr_as2, i32 %offset
+; CHECK-NEXT:  %B = ptrtoint ptr addrspace(2) %A to i32
+; CHECK-NEXT:  %C = zext i32 %B to i64
+; CHECK-NEXT:  %D = sub nsw i64 ptrtoint (ptr addrspace(2) @Arr_as2 to i64), %C
 ; CHECK-NEXT:  ret i64 %D
-  %A = getelementptr bfloat, ptr addrspace(1) @Arr_as1, i32 %offset
-  %B = ptrtoint ptr addrspace(1) %A to i32
+  %A = getelementptr inbounds bfloat, ptr addrspace(2) @Arr_as2, i32 %offset
+  %B = ptrtoint ptr addrspace(2) %A to i32
   %C = zext i32 %B to i64
-  %D = sub i64 ptrtoint (ptr addrspace(1) @Arr_as1 to i64), %C
+  %D = sub i64 ptrtoint (ptr addrspace(2) @Arr_as2 to i64), %C
   ret i64 %D
 }
 



More information about the llvm-commits mailing list