[llvm] [DAGCombiner][RISCV] CSE zext nneg and sext. (PR #82597)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 22 08:48:14 PST 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/82597

>From 6d8822f933f1b852cd22b20dafc448e987b9f855 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 22 Feb 2024 00:00:15 -0800
Subject: [PATCH] [DAGCombiner][RISCV] CSE zext nneg and sext.

If we have a sext and a zext nneg with the same types and operand
we should combine them into the sext. We can't go the other way
because the nneg flag may only be valid in the context of the uses
of the zext nneg.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  7 ++
 llvm/test/CodeGen/RISCV/sext-zext-trunc.ll    | 69 +++++++------------
 2 files changed, 30 insertions(+), 46 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 89ef648ee7d7ed..ed43dd7f528821 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13997,6 +13997,13 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
   if (SDValue Res = tryToFoldExtendSelectLoad(N, TLI, DAG, Level))
     return Res;
 
+  // CSE zext nneg with sext if the zext is not free.
+  if (N->getFlags().hasNonNeg() && !TLI.isZExtFree(N0.getValueType(), VT)) {
+    SDNode *CSENode = DAG.getNodeIfExists(ISD::SIGN_EXTEND, N->getVTList(), N0);
+    if (CSENode)
+      return SDValue(CSENode, 0);
+  }
+
   return SDValue();
 }
 
diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
index 09516d91771cad..87f2a6306bd60d 100644
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -882,11 +882,10 @@ define void @load_zext_nneg_sext_cse(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lhu s0, 0(a0)
-; RV32I-NEXT:    slli a0, s0, 16
-; RV32I-NEXT:    bltz a0, .LBB50_2
+; RV32I-NEXT:    lh s0, 0(a0)
+; RV32I-NEXT:    bltz s0, .LBB50_2
 ; RV32I-NEXT:  # %bb.1: # %bb1
-; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call bar_i16
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -899,48 +898,26 @@ define void @load_zext_nneg_sext_cse(ptr %p) nounwind {
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
-; RV64I-LABEL: load_zext_nneg_sext_cse:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lhu s0, 0(a0)
-; RV64I-NEXT:    slli a0, s0, 48
-; RV64I-NEXT:    bltz a0, .LBB50_2
-; RV64I-NEXT:  # %bb.1: # %bb1
-; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    call bar_i16
-; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    tail bar_i32
-; RV64I-NEXT:  .LBB50_2: # %bb2
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-;
-; RV64ZBB-LABEL: load_zext_nneg_sext_cse:
-; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi sp, sp, -16
-; RV64ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ZBB-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64ZBB-NEXT:    lhu s0, 0(a0)
-; RV64ZBB-NEXT:    sext.h a0, s0
-; RV64ZBB-NEXT:    bltz a0, .LBB50_2
-; RV64ZBB-NEXT:  # %bb.1: # %bb1
-; RV64ZBB-NEXT:    call bar_i16
-; RV64ZBB-NEXT:    mv a0, s0
-; RV64ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64ZBB-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64ZBB-NEXT:    addi sp, sp, 16
-; RV64ZBB-NEXT:    tail bar_i32
-; RV64ZBB-NEXT:  .LBB50_2: # %bb2
-; RV64ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64ZBB-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64ZBB-NEXT:    addi sp, sp, 16
-; RV64ZBB-NEXT:    ret
+; RV64-LABEL: load_zext_nneg_sext_cse:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lh s0, 0(a0)
+; RV64-NEXT:    bltz s0, .LBB50_2
+; RV64-NEXT:  # %bb.1: # %bb1
+; RV64-NEXT:    mv a0, s0
+; RV64-NEXT:    call bar_i16
+; RV64-NEXT:    mv a0, s0
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    tail bar_i32
+; RV64-NEXT:  .LBB50_2: # %bb2
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
   %load = load i16, ptr %p
   %zext = zext nneg i16 %load to i32
   %cmp = icmp sgt i16 %load, -1



More information about the llvm-commits mailing list