[llvm] 813459e - [X86] combineSelect fold 'smin' style pattern select(pcmpgt(RHS, LHS), LHS, RHS) -> select(pcmpgt(LHS, RHS), RHS, LHS) if pcmpgt(LHS, RHS) already exists

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 30 07:32:03 PDT 2022


Author: Simon Pilgrim
Date: 2022-07-30T15:31:36+01:00
New Revision: 813459ed2b0bb48b1236e2e06849b1c7526a2dfe

URL: https://github.com/llvm/llvm-project/commit/813459ed2b0bb48b1236e2e06849b1c7526a2dfe
DIFF: https://github.com/llvm/llvm-project/commit/813459ed2b0bb48b1236e2e06849b1c7526a2dfe.diff

LOG: [X86] combineSelect fold 'smin' style pattern select(pcmpgt(RHS, LHS), LHS, RHS) -> select(pcmpgt(LHS, RHS), RHS, LHS) if pcmpgt(LHS, RHS) already exists

Avoids repeated commuted comparisons when we're performing min/max and clamp patterns

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
    llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
    llvm/test/CodeGen/X86/vselect-minmax.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bcb58d30335ae..a53995e6df7fc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -44857,12 +44857,29 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
     if (SDValue CondNot = IsNOT(Cond, DAG))
       return DAG.getNode(N->getOpcode(), DL, VT,
                          DAG.getBitcast(CondVT, CondNot), RHS, LHS);
-    // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the signbit.
-    if (Cond.getOpcode() == X86ISD::PCMPGT && Cond.hasOneUse() &&
-        ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode())) {
-      Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
-                         DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
-      return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
+
+    if (Cond.getOpcode() == X86ISD::PCMPGT && Cond.hasOneUse()) {
+      // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
+      // signbit.
+      if (ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode())) {
+        Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
+                           DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
+        return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
+      }
+
+      // smin(LHS, RHS) : select(pcmpgt(RHS, LHS), LHS, RHS)
+      //               -> select(pcmpgt(LHS, RHS), RHS, LHS)
+      // iff the commuted pcmpgt() already exists.
+      // TODO: Could DAGCombiner::combine cse search for SETCC nodes, like it
+      // does for commutative binops?
+      if (Cond.getOperand(0) == RHS && Cond.getOperand(1) == LHS) {
+        if (SDNode *FlipCond =
+                DAG.getNodeIfExists(X86ISD::PCMPGT, DAG.getVTList(CondVT),
+                                    {Cond.getOperand(1), Cond.getOperand(0)})) {
+          return DAG.getNode(N->getOpcode(), DL, VT, SDValue(FlipCond, 0), RHS,
+                             LHS);
+        }
+      }
     }
   }
 

diff  --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index 9ea05e1fef601..3b7bd3aa079c1 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -956,8 +956,7 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
 ; AVX1-FALLBACK:       # %bb.0:
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
@@ -976,8 +975,7 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
 ; AVX2-FALLBACK:       # %bb.0:
 ; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
@@ -1401,8 +1399,7 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
 ; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm4
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm2
@@ -1422,8 +1419,7 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
 ; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
 ; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm4
 ; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
 ; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
 ; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm2
@@ -1624,8 +1620,7 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
@@ -1645,8 +1640,7 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
@@ -1850,8 +1844,7 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
@@ -1872,8 +1865,7 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm4
 ; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
 ; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2

diff  --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index 8bbfc0adb7850..1e5b0fa05ef36 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -478,34 +478,32 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
 ; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm9
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm4, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm9, %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm9
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; AVX1-FALLBACK-NEXT:    vpor %xmm2, %xmm8, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm7, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm9, %xmm5
+; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm7, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm9, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm6, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm9, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
@@ -518,8 +516,7 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
 ; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
-; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm4
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm2
@@ -909,34 +906,32 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
 ; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm9
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm6
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm2, %xmm4, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm9, %xmm2, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm4, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm4
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm4, %xmm4
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm3, %xmm1, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm9
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; AVX1-FALLBACK-NEXT:    vpor %xmm3, %xmm8, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm7, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm9, %xmm5
+; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm9, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm6, %xmm4
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm9, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
@@ -950,8 +945,7 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
 ; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm4
-; AVX2-NEXT:    vblendvpd %ymm4, %ymm1, %ymm0, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm4
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsubq %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpsrlq $1, %ymm0, %ymm2
@@ -1125,34 +1119,32 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
 ; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm9
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm4, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm9, %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm2, %xmm4, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm3, %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm4, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm9
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; AVX1-FALLBACK-NEXT:    vpor %xmm3, %xmm8, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm7, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm9, %xmm5
+; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm9, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm9, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
@@ -1166,8 +1158,7 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
 ; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
-; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm4
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm2
@@ -1337,44 +1328,42 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm9
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm9, %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm9
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm7, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm9, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm9, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm9, %xmm5
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm5, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_mem:
@@ -1384,8 +1373,7 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
 ; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
-; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm4
 ; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
 ; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm2

diff  --git a/llvm/test/CodeGen/X86/vselect-minmax.ll b/llvm/test/CodeGen/X86/vselect-minmax.ll
index e8485ef3d6366..fcabb8f461062 100644
--- a/llvm/test/CodeGen/X86/vselect-minmax.ll
+++ b/llvm/test/CodeGen/X86/vselect-minmax.ll
@@ -10284,7 +10284,7 @@ entry:
   ret <2 x i64> %sel
 }
 
-; FIXME: comparisons for smin/smax patterns can be reused
+; comparisons for smin/smax patterns can be reused
 define <8 x i64> @concat_smin_smax(<4 x i64> %a0, <4 x i64> %a1) {
 ; SSE2-LABEL: concat_smin_smax:
 ; SSE2:       # %bb.0:
@@ -10346,45 +10346,41 @@ define <8 x i64> @concat_smin_smax(<4 x i64> %a0, <4 x i64> %a1) {
 ;
 ; SSE4-LABEL: concat_smin_smax:
 ; SSE4:       # %bb.0:
-; SSE4-NEXT:    movdqa %xmm0, %xmm4
-; SSE4-NEXT:    movdqa %xmm2, %xmm0
-; SSE4-NEXT:    pcmpgtq %xmm4, %xmm0
-; SSE4-NEXT:    movdqa %xmm2, %xmm5
-; SSE4-NEXT:    blendvpd %xmm0, %xmm4, %xmm5
-; SSE4-NEXT:    movdqa %xmm3, %xmm0
-; SSE4-NEXT:    pcmpgtq %xmm1, %xmm0
-; SSE4-NEXT:    movdqa %xmm3, %xmm6
-; SSE4-NEXT:    blendvpd %xmm0, %xmm1, %xmm6
-; SSE4-NEXT:    movdqa %xmm4, %xmm0
-; SSE4-NEXT:    pcmpgtq %xmm2, %xmm0
-; SSE4-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
-; SSE4-NEXT:    movdqa %xmm1, %xmm0
-; SSE4-NEXT:    pcmpgtq %xmm3, %xmm0
+; SSE4-NEXT:    movdqa %xmm0, %xmm8
+; SSE4-NEXT:    movdqa %xmm0, %xmm5
+; SSE4-NEXT:    pcmpgtq %xmm2, %xmm5
+; SSE4-NEXT:    movdqa %xmm0, %xmm6
+; SSE4-NEXT:    movdqa %xmm5, %xmm0
+; SSE4-NEXT:    blendvpd %xmm0, %xmm2, %xmm6
+; SSE4-NEXT:    movdqa %xmm1, %xmm7
+; SSE4-NEXT:    pcmpgtq %xmm3, %xmm7
+; SSE4-NEXT:    movdqa %xmm1, %xmm4
+; SSE4-NEXT:    movdqa %xmm7, %xmm0
+; SSE4-NEXT:    blendvpd %xmm0, %xmm3, %xmm4
+; SSE4-NEXT:    movdqa %xmm5, %xmm0
+; SSE4-NEXT:    blendvpd %xmm0, %xmm8, %xmm2
+; SSE4-NEXT:    movdqa %xmm7, %xmm0
 ; SSE4-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
-; SSE4-NEXT:    movapd %xmm5, %xmm0
-; SSE4-NEXT:    movapd %xmm6, %xmm1
+; SSE4-NEXT:    movapd %xmm6, %xmm0
+; SSE4-NEXT:    movapd %xmm4, %xmm1
 ; SSE4-NEXT:    retq
 ;
 ; AVX1-LABEL: concat_smin_smax:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm2
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm3
+; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm0, %ymm2
 ; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm1
 ; AVX1-NEXT:    vmovapd %ymm2, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: concat_smin_smax:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
-; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm2
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vblendvpd %ymm3, %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm1
 ; AVX2-NEXT:    vmovapd %ymm2, %ymm0
 ; AVX2-NEXT:    retq


        


More information about the llvm-commits mailing list