[llvm] c62cb1b - [RISCV][GISel] Stop promoting s32 G_ROTL/ROTR rotate amount to s64 on RV64.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 1 16:14:09 PDT 2024


Author: Craig Topper
Date: 2024-10-01T16:07:26-07:00
New Revision: c62cb1bf1e6dea19fa42584081bd17226ede2db5

URL: https://github.com/llvm/llvm-project/commit/c62cb1bf1e6dea19fa42584081bd17226ede2db5
DIFF: https://github.com/llvm/llvm-project/commit/c62cb1bf1e6dea19fa42584081bd17226ede2db5.diff

LOG: [RISCV][GISel] Stop promoting s32 G_ROTL/ROTR rotate amount to s64 on RV64.

There are no SelectionDAG patterns to share. GISel has its own patterns
since it considers s32 a legal type and SDAG does not.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
    llvm/lib/Target/RISCV/RISCVGISel.td
    llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir
    llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 139ffcd27a0c17..e82d353953a5e1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -204,13 +204,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
 
   auto &RotateActions = getActionDefinitionsBuilder({G_ROTL, G_ROTR});
-  if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
-    RotateActions.legalFor({{s32, sXLen}, {sXLen, sXLen}});
-    // Widen s32 rotate amount to s64 so SDAG patterns will match.
-    if (ST.is64Bit())
-      RotateActions.widenScalarIf(all(typeIs(0, s32), typeIs(1, s32)),
-                                  changeTo(1, sXLen));
-  }
+  if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
+    RotateActions.legalFor({{s32, s32}, {sXLen, sXLen}});
   RotateActions.lower();
 
   getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();

diff  --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 84c00d31800cee..319611111cf470 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -169,6 +169,7 @@ def : StPat<store, SD, GPR, PtrVT>;
 //===----------------------------------------------------------------------===//
 
 def simm12i32 : ImmLeaf<i32, [{return isInt<12>(Imm);}]>;
+def uimm5i32 : ImmLeaf<i32, [{return isUInt<5>(Imm);}]>;
 
 def zext_is_sext : PatFrag<(ops node:$src), (zext node:$src), [{
   KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0), 0);
@@ -337,12 +338,13 @@ def : Pat<(i32 (and GPR:$rs1, (not GPR:$rs2))), (ANDN GPR:$rs1, GPR:$rs2)>;
 def : Pat<(i32 (or  GPR:$rs1, (not GPR:$rs2))), (ORN  GPR:$rs1, GPR:$rs2)>;
 def : Pat<(i32 (xor GPR:$rs1, (not GPR:$rs2))), (XNOR GPR:$rs1, GPR:$rs2)>;
 
-def : PatGprGpr<shiftopw<rotl>, ROLW, i32, i64>;
-def : PatGprGpr<shiftopw<rotr>, RORW, i32, i64>;
-def : PatGprImm<rotr, RORIW, uimm5, i32>;
+def : PatGprGpr<rotl, ROLW, i32, i32>;
+def : PatGprGpr<rotr, RORW, i32, i32>;
+def : Pat<(i32 (rotl GPR:$rs1, uimm5i32:$imm)),
+          (RORIW GPR:$rs1, (i64 (as_i64imm $imm)))>;
 
-def : Pat<(i32 (rotl GPR:$rs1, uimm5:$rs2)),
-          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
+def : Pat<(i32 (rotl GPR:$rs1, uimm5i32:$rs2)),
+          (RORIW GPR:$rs1, (ImmSubFrom32 uimm5i32:$rs2))>;
 } // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
 
 let Predicates = [HasStdExtZbkb, IsRV64] in {

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir
index b9e9f36f766fb9..b75e926bb50c4e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rotate-rv64.mir
@@ -24,9 +24,8 @@ body:             |
     %0:gprb(s64) = COPY $x10
     %1:gprb(s32) = G_TRUNC %0(s64)
     %2:gprb(s64) = COPY $x11
-    %7:gprb(s64) = G_CONSTANT i64 4294967295
-    %6:gprb(s64) = G_AND %2, %7
-    %4:gprb(s32) = G_ROTL %1, %6(s64)
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ROTL %1, %6(s32)
     %5:gprb(s64) = G_ANYEXT %4(s32)
     $x10 = COPY %5(s64)
     PseudoRET implicit $x10
@@ -75,9 +74,8 @@ body:             |
     %0:gprb(s64) = COPY $x10
     %1:gprb(s32) = G_TRUNC %0(s64)
     %2:gprb(s64) = COPY $x11
-    %7:gprb(s64) = G_CONSTANT i64 4294967295
-    %6:gprb(s64) = G_AND %2, %7
-    %4:gprb(s32) = G_ROTR %1, %6(s64)
+    %6:gprb(s32) = G_TRUNC %2(s64)
+    %4:gprb(s32) = G_ROTR %1, %6(s32)
     %5:gprb(s64) = G_ANYEXT %4(s32)
     $x10 = COPY %5(s64)
     PseudoRET implicit $x10
@@ -119,13 +117,13 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[RORIW:%[0-9]+]]:gpr = RORIW [[COPY]], 17
+    ; CHECK-NEXT: [[RORIW:%[0-9]+]]:gpr = RORIW [[COPY]], 15
     ; CHECK-NEXT: $x10 = COPY [[RORIW]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:gprb(s64) = COPY $x10
     %1:gprb(s32) = G_TRUNC %0(s64)
-    %2:gprb(s64) = G_CONSTANT i64 15
-    %3:gprb(s32) = G_ROTL %1, %2(s64)
+    %2:gprb(s32) = G_CONSTANT i32 15
+    %3:gprb(s32) = G_ROTL %1, %2(s32)
     %4:gprb(s64) = G_ANYEXT %3(s32)
     $x10 = COPY %4(s64)
     PseudoRET implicit $x10
@@ -167,13 +165,14 @@ body:             |
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
-    ; CHECK-NEXT: [[RORIW:%[0-9]+]]:gpr = RORIW [[COPY]], 15
-    ; CHECK-NEXT: $x10 = COPY [[RORIW]]
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 15
+    ; CHECK-NEXT: [[RORW:%[0-9]+]]:gpr = RORW [[COPY]], [[ADDI]]
+    ; CHECK-NEXT: $x10 = COPY [[RORW]]
     ; CHECK-NEXT: PseudoRET implicit $x10
     %0:gprb(s64) = COPY $x10
     %1:gprb(s32) = G_TRUNC %0(s64)
-    %2:gprb(s64) = G_CONSTANT i64 15
-    %3:gprb(s32) = G_ROTR %1, %2(s64)
+    %2:gprb(s32) = G_CONSTANT i32 15
+    %3:gprb(s32) = G_ROTR %1, %2(s32)
     %4:gprb(s64) = G_ANYEXT %3(s32)
     $x10 = COPY %4(s64)
     PseudoRET implicit $x10

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
index b9d7b838c3b973..297671280954dd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
@@ -113,9 +113,8 @@ body:             |
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[TRUNC]], [[AND]](s64)
+    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[TRUNC]], [[TRUNC1]](s32)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ROTL]](s32)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10
@@ -273,9 +272,8 @@ body:             |
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
-    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[TRUNC]], [[AND]](s64)
+    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+    ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[TRUNC]], [[TRUNC1]](s32)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ROTR]](s32)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[ANYEXT]](s64)
     ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10


        


More information about the llvm-commits mailing list