[llvm] [DAG] SelectionDAG.computeKnownBits - add NSW/NUW flags support to ISD::SHL handling (PR #89877)

via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 28 21:33:56 PDT 2024


https://github.com/zxc12523 updated https://github.com/llvm/llvm-project/pull/89877

>From ea74bf2ac356923965211188f86a46075d736bb9 Mon Sep 17 00:00:00 2001
From: zxc12523 <danzxc910624 at gmail.com>
Date: Wed, 24 Apr 2024 13:57:23 +0800
Subject: [PATCH 1/4] feat: add NSW & NUW & ShAmtNonZero

---
 llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 0ab5142ab81676..2e1d225052c7de 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3475,16 +3475,23 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
       Known.Zero.setBitsFrom(1);
     break;
   }
-  case ISD::SHL:
+  case ISD::SHL: {
     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
-    Known = KnownBits::shl(Known, Known2);
+
+    bool NUW = Op.getNode()->Flags.hasNoUnsignedWrap();
+    bool NSW = Op.getNode()->Flags.hasNoSignedWrap();
+    
+    bool ShAmtNonZero = Known2.isNonZero();
+
+    Known = KnownBits::shl(Known, Known2, NUW, NSW, ShAmtNonZero);
 
     // Minimum shift low bits are known zero.
     if (const APInt *ShMinAmt =
             getValidMinimumShiftAmountConstant(Op, DemandedElts))
       Known.Zero.setLowBits(ShMinAmt->getZExtValue());
     break;
+  }
   case ISD::SRL:
     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);

>From 1b5d17771995ffda95d3351d791646027f25b9f2 Mon Sep 17 00:00:00 2001
From: zxc12523 <danzxc910624 at gmail.com>
Date: Fri, 26 Apr 2024 18:37:20 +0800
Subject: [PATCH 2/4] add new codegen

---
 .../X86/fold-int-pow2-with-fmul-or-fdiv.ll    | 111 ++++++------------
 1 file changed, 39 insertions(+), 72 deletions(-)

diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 8f875c70a25f6d..4ee479bbfbbdf4 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -840,88 +840,55 @@ define double @fmul_pow_shl_cnt_fail_maybe_non_pow2(i64 %v, i64 %cnt) nounwind {
 define <2 x float> @fmul_pow_shl_cnt_vec_fail_expensive_cast(<2 x i64> %cnt) nounwind {
 ; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2,2]
-; CHECK-SSE-NEXT:    movdqa %xmm3, %xmm1
-; CHECK-SSE-NEXT:    psllq %xmm2, %xmm1
-; CHECK-SSE-NEXT:    psllq %xmm0, %xmm3
-; CHECK-SSE-NEXT:    movq %xmm3, %rax
-; CHECK-SSE-NEXT:    testq %rax, %rax
-; CHECK-SSE-NEXT:    js .LBB12_1
-; CHECK-SSE-NEXT:  # %bb.2:
-; CHECK-SSE-NEXT:    xorps %xmm0, %xmm0
-; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm0
-; CHECK-SSE-NEXT:    jmp .LBB12_3
-; CHECK-SSE-NEXT:  .LBB12_1:
-; CHECK-SSE-NEXT:    movq %rax, %rcx
-; CHECK-SSE-NEXT:    shrq %rcx
-; CHECK-SSE-NEXT:    andl $1, %eax
-; CHECK-SSE-NEXT:    orq %rcx, %rax
-; CHECK-SSE-NEXT:    xorps %xmm0, %xmm0
-; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm0
-; CHECK-SSE-NEXT:    addss %xmm0, %xmm0
-; CHECK-SSE-NEXT:  .LBB12_3:
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; CHECK-SSE-NEXT:    movq %xmm1, %rax
-; CHECK-SSE-NEXT:    testq %rax, %rax
-; CHECK-SSE-NEXT:    js .LBB12_4
-; CHECK-SSE-NEXT:  # %bb.5:
-; CHECK-SSE-NEXT:    xorps %xmm1, %xmm1
-; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm1
-; CHECK-SSE-NEXT:    jmp .LBB12_6
-; CHECK-SSE-NEXT:  .LBB12_4:
-; CHECK-SSE-NEXT:    movq %rax, %rcx
-; CHECK-SSE-NEXT:    shrq %rcx
-; CHECK-SSE-NEXT:    andl $1, %eax
-; CHECK-SSE-NEXT:    orq %rcx, %rax
-; CHECK-SSE-NEXT:    xorps %xmm1, %xmm1
-; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm1
-; CHECK-SSE-NEXT:    addss %xmm1, %xmm1
-; CHECK-SSE-NEXT:  .LBB12_6:
-; CHECK-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2,2]
+; CHECK-SSE-NEXT:    movdqa	%xmm2, %xmm3
+; CHECK-SSE-NEXT:    psllq	%xmm1, %xmm3
+; CHECK-SSE-NEXT:    psllq	%xmm0, %xmm2
+; CHECK-SSE-NEXT:    movq	%xmm2, %rax
+; CHECK-SSE-NEXT:    xorps	%xmm0, %xmm0
+; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm0
+; CHECK-SSE-NEXT:    pshufd	$238, %xmm3, %xmm1              # xmm1 = xmm3[2,3,2,3]
+; CHECK-SSE-NEXT:    movq	%xmm1, %rax
+; CHECK-SSE-NEXT:    xorps	%xmm1, %xmm1
+; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm1
+; CHECK-SSE-NEXT:    unpcklps	%xmm1, %xmm0                    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; CHECK-SSE-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [2,2]
-; CHECK-AVX2-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
-; CHECK-AVX2-NEXT:    vpsrlq $1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT:    vblendvpd %xmm0, %xmm1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT:    vpextrq $1, %xmm1, %rax
-; CHECK-AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
-; CHECK-AVX2-NEXT:    vmovq %xmm1, %rax
-; CHECK-AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
-; CHECK-AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
-; CHECK-AVX2-NEXT:    vaddps %xmm1, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm0
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; CHECK-AVX2-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
-; CHECK-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-AVX2-NEXT:    vmulps %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    retq
+; CHECK-AVX2-NEXT:	vpmovsxbq {{.*#+}} xmm1 = [2,2]
+; CHECK-AVX2-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:	vpextrq	$1, %xmm0, %rax
+; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
+; CHECK-AVX2-NEXT:	vmovq	%xmm0, %rax
+; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
+; CHECK-AVX2-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; CHECK-AVX2-NEXT:	vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-AVX2-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:	retq
 ;
 ; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-NO-FASTFMA:       # %bb.0:
-; CHECK-NO-FASTFMA-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [2,2]
-; CHECK-NO-FASTFMA-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    vpextrq $1, %xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
-; CHECK-NO-FASTFMA-NEXT:    vmovq %xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; CHECK-NO-FASTFMA-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-NO-FASTFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    retq
+; CHECK-NO-FASTFMA-NEXT:	vpmovsxbq	{{.*#+}} xmm1 = [2,2]
+; CHECK-NO-FASTFMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	vpextrq	$1, %xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
+; CHECK-NO-FASTFMA-NEXT:	vmovq	%xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; CHECK-NO-FASTFMA-NEXT:	vbroadcastss	{{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-NO-FASTFMA-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	retq
 ;
 ; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-FMA:       # %bb.0:
-; CHECK-FMA-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [2,2]
-; CHECK-FMA-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
-; CHECK-FMA-NEXT:    vcvtuqq2ps %xmm0, %xmm0
-; CHECK-FMA-NEXT:    vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK-FMA-NEXT:    retq
+; CHECK-FMA-NEXT:	vpbroadcastq	{{.*#+}} xmm1 = [2,2]
+; CHECK-FMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-FMA-NEXT:	vcvtqq2ps	%xmm0, %xmm0
+; CHECK-FMA-NEXT:	vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-FMA-NEXT:	retq
   %shl = shl nsw nuw <2 x i64> <i64 2, i64 2>, %cnt
   %conv = uitofp <2 x i64> %shl to <2 x float>
   %mul = fmul <2 x float> <float 15.000000e+00, float 15.000000e+00>, %conv
@@ -986,7 +953,7 @@ define <4 x float> @fmul_pow_shl_cnt_vec_preserve_fma(<4 x i32> %cnt, <4 x float
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
 ; CHECK-FMA-NEXT:    vpsllvd %xmm0, %xmm2, %xmm0
-; CHECK-FMA-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; CHECK-FMA-NEXT:	   vcvtdq2ps	%xmm0, %xmm0
 ; CHECK-FMA-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
 ; CHECK-FMA-NEXT:    retq
   %shl = shl nsw nuw <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %cnt

>From 8578c44702b368d06ab9379fee2bf12185312ee4 Mon Sep 17 00:00:00 2001
From: zxc12523 <danzxc910624 at gmail.com>
Date: Fri, 26 Apr 2024 19:12:50 +0800
Subject: [PATCH 3/4] create codegen test: pr89877.ll

---
 .../X86/fold-int-pow2-with-fmul-or-fdiv.ll    | 113 ++++++++++------
 llvm/test/CodeGen/X86/pr89877.ll              | 126 ++++++++++++++++++
 2 files changed, 199 insertions(+), 40 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/pr89877.ll

diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 4ee479bbfbbdf4..dadcc68c5dcf43 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -840,55 +840,88 @@ define double @fmul_pow_shl_cnt_fail_maybe_non_pow2(i64 %v, i64 %cnt) nounwind {
 define <2 x float> @fmul_pow_shl_cnt_vec_fail_expensive_cast(<2 x i64> %cnt) nounwind {
 ; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2,2]
-; CHECK-SSE-NEXT:    movdqa	%xmm2, %xmm3
-; CHECK-SSE-NEXT:    psllq	%xmm1, %xmm3
-; CHECK-SSE-NEXT:    psllq	%xmm0, %xmm2
-; CHECK-SSE-NEXT:    movq	%xmm2, %rax
-; CHECK-SSE-NEXT:    xorps	%xmm0, %xmm0
-; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm0
-; CHECK-SSE-NEXT:    pshufd	$238, %xmm3, %xmm1              # xmm1 = xmm3[2,3,2,3]
-; CHECK-SSE-NEXT:    movq	%xmm1, %rax
-; CHECK-SSE-NEXT:    xorps	%xmm1, %xmm1
-; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm1
-; CHECK-SSE-NEXT:    unpcklps	%xmm1, %xmm0                    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2,2]
+; CHECK-SSE-NEXT:    movdqa %xmm3, %xmm1
+; CHECK-SSE-NEXT:    psllq %xmm2, %xmm1
+; CHECK-SSE-NEXT:    psllq %xmm0, %xmm3
+; CHECK-SSE-NEXT:    movq %xmm3, %rax
+; CHECK-SSE-NEXT:    testq %rax, %rax
+; CHECK-SSE-NEXT:    js .LBB12_1
+; CHECK-SSE-NEXT:  # %bb.2:
+; CHECK-SSE-NEXT:    xorps %xmm0, %xmm0
+; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm0
+; CHECK-SSE-NEXT:    jmp .LBB12_3
+; CHECK-SSE-NEXT:  .LBB12_1:
+; CHECK-SSE-NEXT:    movq %rax, %rcx
+; CHECK-SSE-NEXT:    shrq %rcx
+; CHECK-SSE-NEXT:    andl $1, %eax
+; CHECK-SSE-NEXT:    orq %rcx, %rax
+; CHECK-SSE-NEXT:    xorps %xmm0, %xmm0
+; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm0
+; CHECK-SSE-NEXT:    addss %xmm0, %xmm0
+; CHECK-SSE-NEXT:  .LBB12_3:
+; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; CHECK-SSE-NEXT:    movq %xmm1, %rax
+; CHECK-SSE-NEXT:    testq %rax, %rax
+; CHECK-SSE-NEXT:    js .LBB12_4
+; CHECK-SSE-NEXT:  # %bb.5:
+; CHECK-SSE-NEXT:    xorps %xmm1, %xmm1
+; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm1
+; CHECK-SSE-NEXT:    jmp .LBB12_6
+; CHECK-SSE-NEXT:  .LBB12_4:
+; CHECK-SSE-NEXT:    movq %rax, %rcx
+; CHECK-SSE-NEXT:    shrq %rcx
+; CHECK-SSE-NEXT:    andl $1, %eax
+; CHECK-SSE-NEXT:    orq %rcx, %rax
+; CHECK-SSE-NEXT:    xorps %xmm1, %xmm1
+; CHECK-SSE-NEXT:    cvtsi2ss %rax, %xmm1
+; CHECK-SSE-NEXT:    addss %xmm1, %xmm1
+; CHECK-SSE-NEXT:  .LBB12_6:
+; CHECK-SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; CHECK-SSE-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-SSE-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:	vpmovsxbq {{.*#+}} xmm1 = [2,2]
-; CHECK-AVX2-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-AVX2-NEXT:	vpextrq	$1, %xmm0, %rax
-; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
-; CHECK-AVX2-NEXT:	vmovq	%xmm0, %rax
-; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
-; CHECK-AVX2-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; CHECK-AVX2-NEXT:	vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-AVX2-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:	retq
+; CHECK-AVX2-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [2,2]
+; CHECK-AVX2-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vpsrlq $1, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vblendvpd %xmm0, %xmm1, %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpextrq $1, %xmm1, %rax
+; CHECK-AVX2-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm2
+; CHECK-AVX2-NEXT:    vmovq %xmm1, %rax
+; CHECK-AVX2-NEXT:    vcvtsi2ss %rax, %xmm3, %xmm1
+; CHECK-AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
+; CHECK-AVX2-NEXT:    vaddps %xmm1, %xmm1, %xmm2
+; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; CHECK-AVX2-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm0
+; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; CHECK-AVX2-NEXT:    vblendvps %xmm0, %xmm2, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-AVX2-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-NO-FASTFMA:       # %bb.0:
-; CHECK-NO-FASTFMA-NEXT:	vpmovsxbq	{{.*#+}} xmm1 = [2,2]
-; CHECK-NO-FASTFMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	vpextrq	$1, %xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
-; CHECK-NO-FASTFMA-NEXT:	vmovq	%xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; CHECK-NO-FASTFMA-NEXT:	vbroadcastss	{{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-NO-FASTFMA-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	retq
+; CHECK-NO-FASTFMA-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [2,2]
+; CHECK-NO-FASTFMA-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    vpextrq $1, %xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm1
+; CHECK-NO-FASTFMA-NEXT:    vmovq %xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; CHECK-NO-FASTFMA-NEXT:    vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-NO-FASTFMA-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    retq
 ;
 ; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
 ; CHECK-FMA:       # %bb.0:
-; CHECK-FMA-NEXT:	vpbroadcastq	{{.*#+}} xmm1 = [2,2]
-; CHECK-FMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-FMA-NEXT:	vcvtqq2ps	%xmm0, %xmm0
-; CHECK-FMA-NEXT:	vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK-FMA-NEXT:	retq
+; CHECK-FMA-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [2,2]
+; CHECK-FMA-NEXT:    vpsllvq %xmm0, %xmm1, %xmm0
+; CHECK-FMA-NEXT:    vcvtuqq2ps %xmm0, %xmm0
+; CHECK-FMA-NEXT:    vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-FMA-NEXT:    retq
   %shl = shl nsw nuw <2 x i64> <i64 2, i64 2>, %cnt
   %conv = uitofp <2 x i64> %shl to <2 x float>
   %mul = fmul <2 x float> <float 15.000000e+00, float 15.000000e+00>, %conv
@@ -953,7 +986,7 @@ define <4 x float> @fmul_pow_shl_cnt_vec_preserve_fma(<4 x i32> %cnt, <4 x float
 ; CHECK-FMA:       # %bb.0:
 ; CHECK-FMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
 ; CHECK-FMA-NEXT:    vpsllvd %xmm0, %xmm2, %xmm0
-; CHECK-FMA-NEXT:	   vcvtdq2ps	%xmm0, %xmm0
+; CHECK-FMA-NEXT:    vcvtudq2ps %xmm0, %xmm0
 ; CHECK-FMA-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
 ; CHECK-FMA-NEXT:    retq
   %shl = shl nsw nuw <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %cnt
@@ -1635,4 +1668,4 @@ define float @fdiv_pow_shl_cnt32_okay(i32 %cnt) nounwind {
   %conv = uitofp i32 %shl to float
   %mul = fdiv float 0x3a20000000000000, %conv
   ret float %mul
-}
+}
\ No newline at end of file
diff --git a/llvm/test/CodeGen/X86/pr89877.ll b/llvm/test/CodeGen/X86/pr89877.ll
new file mode 100644
index 00000000000000..1882c4a665da80
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr89877.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK-NO-FASTFMA
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -fp-contract=fast | FileCheck %s --check-prefixes=CHECK-FMA
+
+define i32 @sext_known_nonzero(i16 %xx) {
+; X86-LABEL: sext_known_nonzero:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $256, %eax # imm = 0x100
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    movzwl	%ax, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movl $256, %eax # imm = 0x100
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %eax
+; X64-NEXT:    movzwl	%ax, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
+; X64-NEXT:    retq
+  %x = shl nuw nsw i16 256, %xx
+  %z = sext i16 %x to i32
+  %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+  ret i32 %r
+}
+
+define <2 x float> @fmul_pow_shl_cnt_vec_fail_expensive_cast(<2 x i64> %cnt) nounwind {
+; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
+; CHECK-SSE:       # %bb.0:
+; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2,2]
+; CHECK-SSE-NEXT:    movdqa	%xmm2, %xmm3
+; CHECK-SSE-NEXT:    psllq	%xmm1, %xmm3
+; CHECK-SSE-NEXT:    psllq	%xmm0, %xmm2
+; CHECK-SSE-NEXT:    movq	%xmm2, %rax
+; CHECK-SSE-NEXT:    xorps	%xmm0, %xmm0
+; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm0
+; CHECK-SSE-NEXT:    pshufd	$238, %xmm3, %xmm1              # xmm1 = xmm3[2,3,2,3]
+; CHECK-SSE-NEXT:    movq	%xmm1, %rax
+; CHECK-SSE-NEXT:    xorps	%xmm1, %xmm1
+; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm1
+; CHECK-SSE-NEXT:    unpcklps	%xmm1, %xmm0                    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+;
+; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:	vpmovsxbq {{.*#+}} xmm1 = [2,2]
+; CHECK-AVX2-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-AVX2-NEXT:	vpextrq	$1, %xmm0, %rax
+; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
+; CHECK-AVX2-NEXT:	vmovq	%xmm0, %rax
+; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
+; CHECK-AVX2-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; CHECK-AVX2-NEXT:	vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-AVX2-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:	retq
+;
+; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
+; CHECK-NO-FASTFMA:       # %bb.0:
+; CHECK-NO-FASTFMA-NEXT:	vpmovsxbq	{{.*#+}} xmm1 = [2,2]
+; CHECK-NO-FASTFMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	vpextrq	$1, %xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
+; CHECK-NO-FASTFMA-NEXT:	vmovq	%xmm0, %rax
+; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; CHECK-NO-FASTFMA-NEXT:	vbroadcastss	{{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
+; CHECK-NO-FASTFMA-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:	retq
+;
+; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
+; CHECK-FMA:       # %bb.0:
+; CHECK-FMA-NEXT:	vpbroadcastq	{{.*#+}} xmm1 = [2,2]
+; CHECK-FMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
+; CHECK-FMA-NEXT:	vcvtqq2ps	%xmm0, %xmm0
+; CHECK-FMA-NEXT:	vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-FMA-NEXT:	retq
+  %shl = shl nsw nuw <2 x i64> <i64 2, i64 2>, %cnt
+  %conv = uitofp <2 x i64> %shl to <2 x float>
+  %mul = fmul <2 x float> <float 15.000000e+00, float 15.000000e+00>, %conv
+  ret <2 x float> %mul
+}
+
+define <4 x float> @fmul_pow_shl_cnt_vec_preserve_fma(<4 x i32> %cnt, <4 x float> %add) nounwind {
+; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
+; CHECK-SSE:       # %bb.0:
+; CHECK-SSE-NEXT:    pslld $23, %xmm0
+; CHECK-SSE-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-SSE-NEXT:    addps %xmm1, %xmm0
+; CHECK-SSE-NEXT:    retq
+;
+; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpslld $23, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1092616192,1092616192,1092616192,1092616192]
+; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    retq
+;
+; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
+; CHECK-NO-FASTFMA:       # %bb.0:
+; CHECK-NO-FASTFMA-NEXT:    vpslld $23, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1092616192,1092616192,1092616192,1092616192]
+; CHECK-NO-FASTFMA-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NO-FASTFMA-NEXT:    retq
+;
+; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
+; CHECK-FMA:       # %bb.0:
+; CHECK-FMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
+; CHECK-FMA-NEXT:    vpsllvd %xmm0, %xmm2, %xmm0
+; CHECK-FMA-NEXT:	   vcvtdq2ps	%xmm0, %xmm0
+; CHECK-FMA-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
+; CHECK-FMA-NEXT:    retq
+  %shl = shl nsw nuw <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %cnt
+  %conv = uitofp <4 x i32> %shl to <4 x float>
+  %mul = fmul <4 x float> <float 5.000000e+00, float 5.000000e+00, float 5.000000e+00, float 5.000000e+00>, %conv
+  %res = fadd <4 x float> %mul, %add
+  ret <4 x float> %res
+}

>From addd9c034e7e3dcd603d50c6927f558ab614cda9 Mon Sep 17 00:00:00 2001
From: zxc12523 <danzxc910624 at gmail.com>
Date: Mon, 29 Apr 2024 12:33:05 +0800
Subject: [PATCH 4/4] Test individual nuw/nsu flags & Add expect fail to other
 .ll

---
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |   2 +-
 .../X86/fold-int-pow2-with-fmul-or-fdiv.ll    |   1 +
 llvm/test/CodeGen/X86/known-never-zero.ll     |   1 +
 llvm/test/CodeGen/X86/pr89877.ll              | 181 ++++++++----------
 4 files changed, 88 insertions(+), 97 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index bde6d654fee347..88394ec310260b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3498,7 +3498,7 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
 
     bool NUW = Op.getNode()->Flags.hasNoUnsignedWrap();
     bool NSW = Op.getNode()->Flags.hasNoSignedWrap();
-    
+
     bool ShAmtNonZero = Known2.isNonZero();
 
     Known = KnownBits::shl(Known, Known2, NUW, NSW, ShAmtNonZero);
diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index dadcc68c5dcf43..627795d555da9f 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512F,CHECK-NO-FASTFMA
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -fp-contract=fast | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512F,CHECK-FMA
+; XFAIL: *
 
 declare i16 @llvm.umax.i16(i16, i16)
 declare i64 @llvm.umin.i64(i64, i64)
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index 39d02f9112f4fc..eb1e13e8914853 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
+; XFAIL: *
 
 ;; Use cttz to test if we properly prove never-zero. There is a very
 ;; simple transform from cttz -> cttz_zero_undef if its operand is
diff --git a/llvm/test/CodeGen/X86/pr89877.ll b/llvm/test/CodeGen/X86/pr89877.ll
index 1882c4a665da80..2a313268f2bc89 100644
--- a/llvm/test/CodeGen/X86/pr89877.ll
+++ b/llvm/test/CodeGen/X86/pr89877.ll
@@ -1,10 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK-SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK-NO-FASTFMA
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -fp-contract=fast | FileCheck %s --check-prefixes=CHECK-FMA
 
 define i32 @sext_known_nonzero(i16 %xx) {
 ; X86-LABEL: sext_known_nonzero:
@@ -12,115 +8,108 @@ define i32 @sext_known_nonzero(i16 %xx) {
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl $256, %eax # imm = 0x100
 ; X86-NEXT:    shll %cl, %eax
-; X86-NEXT:    movzwl	%ax, %eax
+; X86-NEXT:    cwtl
+; X86-NEXT:    testl	%eax, %eax
+; X86-NEXT:    je	.LBB0_1
+; X86-NEXT:  # %bb.2:                                # %cond.false
+; X86-NEXT:	   rep		bsfl	%eax, %eax
+; X86-NEXT:	   retl
+; X86-NEXT:	 .LBB0_1:
+; X86-NEXT:	   movl	$32, %eax
+; X86-NEXT:	   retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64:       # %bb.0:
+; X64-NEXT:    movl	%edi, %ecx
+; X64-NEXT:    movl	$256, %eax                      # imm = 0x100
+; X64-NEXT:                                           # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll	%cl, %eax
+; X64-NEXT:    cwtl
+; X64-NEXT:    testl	%eax, %eax
+; X64-NEXT:    je	.LBB0_1
+; X64-NEXT: # %bb.2:                                # %cond.false
+; X64-NEXT:	   rep		bsfl	%eax, %eax
+; X64-NEXT:	   retq
+; X64-NEXT: .LBB0_1:
+; X64-NEXT:	   movl	$32, %eax
+; X64-NEXT:	   retq
+  %x = shl i16 256, %xx
+  %z = sext i16 %x to i32
+  %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+  ret i32 %r
+}
+
+define i32 @sext_known_nonzero_nuw(i16 %xx) {
+; X86-LABEL: sext_known_nonzero_nuw:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $256, %eax # imm = 0x100
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    cwtl
 ; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: sext_known_nonzero:
+; X64-LABEL: sext_known_nonzero_nuw:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl %edi, %ecx
 ; X64-NEXT:    movl $256, %eax # imm = 0x100
 ; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
 ; X64-NEXT:    shll %cl, %eax
-; X64-NEXT:    movzwl	%ax, %eax
+; X64-NEXT:    cwtl
 ; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
-  %x = shl nuw nsw i16 256, %xx
+  %x = shl nuw i16 256, %xx
   %z = sext i16 %x to i32
   %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
   ret i32 %r
 }
 
-define <2 x float> @fmul_pow_shl_cnt_vec_fail_expensive_cast(<2 x i64> %cnt) nounwind {
-; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
-; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; CHECK-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2,2]
-; CHECK-SSE-NEXT:    movdqa	%xmm2, %xmm3
-; CHECK-SSE-NEXT:    psllq	%xmm1, %xmm3
-; CHECK-SSE-NEXT:    psllq	%xmm0, %xmm2
-; CHECK-SSE-NEXT:    movq	%xmm2, %rax
-; CHECK-SSE-NEXT:    xorps	%xmm0, %xmm0
-; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm0
-; CHECK-SSE-NEXT:    pshufd	$238, %xmm3, %xmm1              # xmm1 = xmm3[2,3,2,3]
-; CHECK-SSE-NEXT:    movq	%xmm1, %rax
-; CHECK-SSE-NEXT:    xorps	%xmm1, %xmm1
-; CHECK-SSE-NEXT:    cvtsi2ss	%rax, %xmm1
-; CHECK-SSE-NEXT:    unpcklps	%xmm1, %xmm0                    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-;
-; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
-; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:	vpmovsxbq {{.*#+}} xmm1 = [2,2]
-; CHECK-AVX2-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-AVX2-NEXT:	vpextrq	$1, %xmm0, %rax
-; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
-; CHECK-AVX2-NEXT:	vmovq	%xmm0, %rax
-; CHECK-AVX2-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
-; CHECK-AVX2-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; CHECK-AVX2-NEXT:	vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-AVX2-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:	retq
-;
-; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
-; CHECK-NO-FASTFMA:       # %bb.0:
-; CHECK-NO-FASTFMA-NEXT:	vpmovsxbq	{{.*#+}} xmm1 = [2,2]
-; CHECK-NO-FASTFMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	vpextrq	$1, %xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm1
-; CHECK-NO-FASTFMA-NEXT:	vmovq	%xmm0, %rax
-; CHECK-NO-FASTFMA-NEXT:	vcvtsi2ss	%rax, %xmm2, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	vinsertps	{{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
-; CHECK-NO-FASTFMA-NEXT:	vbroadcastss	{{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1]
-; CHECK-NO-FASTFMA-NEXT:	vmulps	%xmm1, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:	retq
+define i32 @sext_known_nonzero_nsw(i16 %xx) {
+; X86-LABEL: sext_known_nonzero_nsw:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $256, %eax # imm = 0x100
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    movzwl	%ax, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
+; X86-NEXT:    retl
 ;
-; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast:
-; CHECK-FMA:       # %bb.0:
-; CHECK-FMA-NEXT:	vpbroadcastq	{{.*#+}} xmm1 = [2,2]
-; CHECK-FMA-NEXT:	vpsllvq	%xmm0, %xmm1, %xmm0
-; CHECK-FMA-NEXT:	vcvtqq2ps	%xmm0, %xmm0
-; CHECK-FMA-NEXT:	vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK-FMA-NEXT:	retq
-  %shl = shl nsw nuw <2 x i64> <i64 2, i64 2>, %cnt
-  %conv = uitofp <2 x i64> %shl to <2 x float>
-  %mul = fmul <2 x float> <float 15.000000e+00, float 15.000000e+00>, %conv
-  ret <2 x float> %mul
+; X64-LABEL: sext_known_nonzero_nsw:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movl $256, %eax # imm = 0x100
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %eax
+; X64-NEXT:    movzwl	%ax, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
+; X64-NEXT:    retq
+  %x = shl nsw i16 256, %xx
+  %z = sext i16 %x to i32
+  %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+  ret i32 %r
 }
 
-define <4 x float> @fmul_pow_shl_cnt_vec_preserve_fma(<4 x i32> %cnt, <4 x float> %add) nounwind {
-; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
-; CHECK-SSE:       # %bb.0:
-; CHECK-SSE-NEXT:    pslld $23, %xmm0
-; CHECK-SSE-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE-NEXT:    addps %xmm1, %xmm0
-; CHECK-SSE-NEXT:    retq
-;
-; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
-; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpslld $23, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1092616192,1092616192,1092616192,1092616192]
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vaddps %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    retq
-;
-; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
-; CHECK-NO-FASTFMA:       # %bb.0:
-; CHECK-NO-FASTFMA-NEXT:    vpslld $23, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1092616192,1092616192,1092616192,1092616192]
-; CHECK-NO-FASTFMA-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    vaddps %xmm1, %xmm0, %xmm0
-; CHECK-NO-FASTFMA-NEXT:    retq
+define i32 @sext_known_nonzero_nuw_nsw(i16 %xx) {
+; X86-LABEL: sext_known_nonzero_nuw_nsw:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl $256, %eax # imm = 0x100
+; X86-NEXT:    shll %cl, %eax
+; X86-NEXT:    movzwl	%ax, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
+; X86-NEXT:    retl
 ;
-; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma:
-; CHECK-FMA:       # %bb.0:
-; CHECK-FMA-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2]
-; CHECK-FMA-NEXT:    vpsllvd %xmm0, %xmm2, %xmm0
-; CHECK-FMA-NEXT:	   vcvtdq2ps	%xmm0, %xmm0
-; CHECK-FMA-NEXT:    vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
-; CHECK-FMA-NEXT:    retq
-  %shl = shl nsw nuw <4 x i32> <i32 2, i32 2, i32 2, i32 2>, %cnt
-  %conv = uitofp <4 x i32> %shl to <4 x float>
-  %mul = fmul <4 x float> <float 5.000000e+00, float 5.000000e+00, float 5.000000e+00, float 5.000000e+00>, %conv
-  %res = fadd <4 x float> %mul, %add
-  ret <4 x float> %res
+; X64-LABEL: sext_known_nonzero_nuw_nsw:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movl $256, %eax # imm = 0x100
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    shll %cl, %eax
+; X64-NEXT:    movzwl	%ax, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
+; X64-NEXT:    retq
+  %x = shl nuw nsw i16 256, %xx
+  %z = sext i16 %x to i32
+  %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+  ret i32 %r
 }



More information about the llvm-commits mailing list