[llvm] ff98efa - [RISCV][GISel] Enable shift_immed_chain in RISCVPostLegalizerCombiner

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 8 16:03:48 PST 2024


Author: Craig Topper
Date: 2024-11-08T16:02:23-08:00
New Revision: ff98efa329f3866ed7ddd461e9473729c2b91568

URL: https://github.com/llvm/llvm-project/commit/ff98efa329f3866ed7ddd461e9473729c2b91568
DIFF: https://github.com/llvm/llvm-project/commit/ff98efa329f3866ed7ddd461e9473729c2b91568.diff

LOG: [RISCV][GISel] Enable shift_immed_chain in RISCVPostLegalizerCombiner

This helps combine back to back shifts that may get created when
sext_inreg is legalized.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVCombine.td
    llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVCombine.td b/llvm/lib/Target/RISCV/RISCVCombine.td
index 69043e9d934df8..22a447c2649b29 100644
--- a/llvm/lib/Target/RISCV/RISCVCombine.td
+++ b/llvm/lib/Target/RISCV/RISCVCombine.td
@@ -24,6 +24,6 @@ def RISCVO0PreLegalizerCombiner: GICombiner<
 def RISCVPostLegalizerCombiner
     : GICombiner<"RISCVPostLegalizerCombinerImpl",
                  [sub_to_add, combines_for_extload, redundant_and,
-                  identity_combines, commute_constant_to_rhs,
+                  identity_combines, shift_immed_chain, commute_constant_to_rhs,
                   constant_fold_cast_op]> {
 }

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
index 32593a74d307ef..d8f20b29e2f064 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
@@ -17,8 +17,7 @@ define i8 @abs8(i8 %x) {
 ; RV32I-LABEL: abs8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a0, 24
-; RV32I-NEXT:    srai a1, a1, 24
-; RV32I-NEXT:    srai a1, a1, 7
+; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -33,8 +32,7 @@ define i8 @abs8(i8 %x) {
 ; RV64I-LABEL: abs8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a0, 24
-; RV64I-NEXT:    sraiw a1, a1, 24
-; RV64I-NEXT:    sraiw a1, a1, 7
+; RV64I-NEXT:    sraiw a1, a1, 31
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -53,8 +51,7 @@ define i16 @abs16(i16 %x) {
 ; RV32I-LABEL: abs16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a0, 16
-; RV32I-NEXT:    srai a1, a1, 16
-; RV32I-NEXT:    srai a1, a1, 15
+; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -69,8 +66,7 @@ define i16 @abs16(i16 %x) {
 ; RV64I-LABEL: abs16:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a1, a0, 16
-; RV64I-NEXT:    sraiw a1, a1, 16
-; RV64I-NEXT:    sraiw a1, a1, 15
+; RV64I-NEXT:    sraiw a1, a1, 31
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
index 87c579da697ce4..c558639fda424e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
@@ -335,13 +335,11 @@ define i8 @srli_i8(i8 %a) nounwind {
 }
 
 ; FIXME: We should use slli+srai with Zbb for better compression.
-; FIXME: We should combine back to back srai.
 define i8 @srai_i8(i8 %a) nounwind {
 ; RV32I-LABEL: srai_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    srai a0, a0, 5
+; RV32I-NEXT:    srai a0, a0, 29
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: srai_i8:
@@ -353,8 +351,7 @@ define i8 @srai_i8(i8 %a) nounwind {
 ; RV32ZBKB-LABEL: srai_i8:
 ; RV32ZBKB:       # %bb.0:
 ; RV32ZBKB-NEXT:    slli a0, a0, 24
-; RV32ZBKB-NEXT:    srai a0, a0, 24
-; RV32ZBKB-NEXT:    srai a0, a0, 5
+; RV32ZBKB-NEXT:    srai a0, a0, 29
 ; RV32ZBKB-NEXT:    ret
   %1 = ashr i8 %a, 5
   ret i8 %1
@@ -380,13 +377,11 @@ define i16 @srli_i16(i16 %a) nounwind {
 }
 
 ; FIXME: We should use slli+srai with Zbb/Zbkb for better compression.
-; FIXME: We should combine back to back sraiw.
 define i16 @srai_i16(i16 %a) nounwind {
 ; RV32I-LABEL: srai_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srai a0, a0, 16
-; RV32I-NEXT:    srai a0, a0, 9
+; RV32I-NEXT:    srai a0, a0, 25
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: srai_i16:
@@ -398,8 +393,7 @@ define i16 @srai_i16(i16 %a) nounwind {
 ; RV32ZBKB-LABEL: srai_i16:
 ; RV32ZBKB:       # %bb.0:
 ; RV32ZBKB-NEXT:    slli a0, a0, 16
-; RV32ZBKB-NEXT:    srai a0, a0, 16
-; RV32ZBKB-NEXT:    srai a0, a0, 9
+; RV32ZBKB-NEXT:    srai a0, a0, 25
 ; RV32ZBKB-NEXT:    ret
   %1 = ashr i16 %a, 9
   ret i16 %1

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
index 0b6b2b2776a2a4..1184905c17edea 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb.ll
@@ -620,9 +620,9 @@ define i32 @sextb_i32(i32 %a) nounwind {
 define i64 @sextb_i64(i64 %a) nounwind {
 ; RV32I-LABEL: sextb_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 24
-; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    slli a1, a0, 24
+; RV32I-NEXT:    srai a0, a1, 24
+; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: sextb_i64:
@@ -655,9 +655,9 @@ define i32 @sexth_i32(i32 %a) nounwind {
 define i64 @sexth_i64(i64 %a) nounwind {
 ; RV32I-LABEL: sexth_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slli a0, a0, 16
-; RV32I-NEXT:    srai a0, a0, 16
-; RV32I-NEXT:    srai a1, a0, 31
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srai a0, a1, 16
+; RV32I-NEXT:    srai a1, a1, 31
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: sexth_i64:

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
index 7ce65771571f0c..a6d3ddbf199931 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb-zbkb.ll
@@ -439,13 +439,11 @@ define i8 @srli_i8(i8 %a) nounwind {
 }
 
 ; FIXME: We should use slli+srai with Zbb for better compression.
-; FIXME: We should combine back to back sraiw.
 define i8 @srai_i8(i8 %a) nounwind {
 ; RV64I-LABEL: srai_i8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    sraiw a0, a0, 24
-; RV64I-NEXT:    sraiw a0, a0, 5
+; RV64I-NEXT:    sraiw a0, a0, 29
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: srai_i8:
@@ -457,8 +455,7 @@ define i8 @srai_i8(i8 %a) nounwind {
 ; RV64ZBKB-LABEL: srai_i8:
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    slli a0, a0, 24
-; RV64ZBKB-NEXT:    sraiw a0, a0, 24
-; RV64ZBKB-NEXT:    sraiw a0, a0, 5
+; RV64ZBKB-NEXT:    sraiw a0, a0, 29
 ; RV64ZBKB-NEXT:    ret
   %1 = ashr i8 %a, 5
   ret i8 %1
@@ -492,13 +489,11 @@ define i16 @srli_i16(i16 %a) nounwind {
 }
 
 ; FIXME: We should use slli+srai with Zbb/Zbkb for better compression.
-; FIXME: We should combine back to back sraiw.
 define i16 @srai_i16(i16 %a) nounwind {
 ; RV64I-LABEL: srai_i16:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 16
-; RV64I-NEXT:    sraiw a0, a0, 16
-; RV64I-NEXT:    sraiw a0, a0, 9
+; RV64I-NEXT:    sraiw a0, a0, 25
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: srai_i16:
@@ -510,8 +505,7 @@ define i16 @srai_i16(i16 %a) nounwind {
 ; RV64ZBKB-LABEL: srai_i16:
 ; RV64ZBKB:       # %bb.0:
 ; RV64ZBKB-NEXT:    slli a0, a0, 16
-; RV64ZBKB-NEXT:    sraiw a0, a0, 16
-; RV64ZBKB-NEXT:    sraiw a0, a0, 9
+; RV64ZBKB-NEXT:    sraiw a0, a0, 25
 ; RV64ZBKB-NEXT:    ret
   %1 = ashr i16 %a, 9
   ret i16 %1

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 89b30f733cc33a..1d0e38d2f91c44 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -270,17 +270,17 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-LABEL: ctlz_lshr_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a0, a0, 1
-; RV64I-NEXT:    slli a1, a0, 32
-; RV64I-NEXT:    srli a1, a1, 32
-; RV64I-NEXT:    beqz a1, .LBB4_2
+; RV64I-NEXT:    srliw a1, a0, 1
+; RV64I-NEXT:    slli a2, a1, 32
+; RV64I-NEXT:    srli a2, a2, 32
+; RV64I-NEXT:    beqz a2, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    .cfi_def_cfa_offset 16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    .cfi_offset ra, -8
-; RV64I-NEXT:    srliw a1, a0, 1
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    srliw a0, a0, 2
+; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    srliw a1, a0, 2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    srliw a1, a0, 4


        


More information about the llvm-commits mailing list