[llvm] 8911170 - [DAG] canCreateUndefOrPoison - add freeze(rot(x, y)) -> rot(freeze(x),freeze(y)) support

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 22 09:25:01 PDT 2022


Author: Simon Pilgrim
Date: 2022-10-22T17:24:53+01:00
New Revision: 89111707ec3abbd50197076c13b15b176656138a

URL: https://github.com/llvm/llvm-project/commit/89111707ec3abbd50197076c13b15b176656138a
DIFF: https://github.com/llvm/llvm-project/commit/89111707ec3abbd50197076c13b15b176656138a.diff

LOG: [DAG] canCreateUndefOrPoison - add freeze(rot(x,y)) -> rot(freeze(x),freeze(y)) support

The rotation amount is always modulo, so won't introduce poison/undef

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/X86/freeze-binary.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9f01fa53a98ed..95c309b02991a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4626,6 +4626,8 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
   case ISD::AND:
   case ISD::OR:
   case ISD::XOR:
+  case ISD::ROTL:
+  case ISD::ROTR:
   case ISD::BSWAP:
   case ISD::CTPOP:
   case ISD::BITREVERSE:

diff  --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index ce81e6c7351a9..552cd3325747b 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -630,15 +630,13 @@ define i32 @freeze_rotl(i32 %a0) nounwind {
 ; X86-LABEL: freeze_rotl:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    roll $5, %eax
-; X86-NEXT:    roll $5, %eax
+; X86-NEXT:    roll $10, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_rotl:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    roll $5, %eax
-; X64-NEXT:    roll $5, %eax
+; X64-NEXT:    roll $10, %eax
 ; X64-NEXT:    retq
   %x = call i32 @llvm.fshl.i32(i32 %a0, i32 %a0, i32 5)
   %y = freeze i32 %x
@@ -650,35 +648,16 @@ declare i32 @llvm.fshl.i32(i32, i32, i32)
 define <4 x i32> @freeze_rotl_vec(<4 x i32> %a0) nounwind {
 ; X86-LABEL: freeze_rotl_vec:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    por %xmm2, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    por %xmm2, %xmm0
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $2, %xmm1
+; X86-NEXT:    pslld $30, %xmm0
+; X86-NEXT:    por %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_rotl_vec:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; X64-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; X64-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpsrld $2, %xmm0, %xmm1
+; X64-NEXT:    vpslld $30, %xmm0, %xmm0
 ; X64-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %x = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a0, <4 x i32> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>)
@@ -692,15 +671,13 @@ define i32 @freeze_rotr(i32 %a0) nounwind {
 ; X86-LABEL: freeze_rotr:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    rorl $11, %eax
-; X86-NEXT:    rorl $13, %eax
+; X86-NEXT:    rorl $24, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_rotr:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    rorl $11, %eax
-; X64-NEXT:    rorl $13, %eax
+; X64-NEXT:    rorl $24, %eax
 ; X64-NEXT:    retq
   %x = call i32 @llvm.fshr.i32(i32 %a0, i32 %a0, i32 11)
   %y = freeze i32 %x
@@ -712,35 +689,16 @@ declare i32 @llvm.fshr.i32(i32, i32, i32)
 define <4 x i32> @freeze_rotr_vec(<4 x i32> %a0) nounwind {
 ; X86-LABEL: freeze_rotr_vec:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    por %xmm2, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    por %xmm2, %xmm0
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $31, %xmm1
+; X86-NEXT:    paddd %xmm0, %xmm0
+; X86-NEXT:    por %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: freeze_rotr_vec:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; X64-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; X64-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpsrld $31, %xmm0, %xmm1
+; X64-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; X64-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %x = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a0, <4 x i32> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>)


        


More information about the llvm-commits mailing list