[llvm] [X86] combinePCLMULQDQ - attempt to fold PCLMULQDQ(SHUFFLE(X),SHUFFLE(Y),C) -> PCLMULQDQ(X,Y,C') (REAPPLIED) (PR #177454)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 22 13:50:46 PST 2026


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/177454

>From 292d613874ce5604a30e898a9217eb0a16aac481 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Thu, 22 Jan 2026 20:21:14 +0000
Subject: [PATCH] [X86] combinePCLMULQDQ - attempt to fold
 PCLMULQDQ(SHUFFLE(X),SHUFFLE(Y),C) -> PCLMULQDQ(X,Y,C') (REAPPLIED)

Peek through input shuffle operands and see if we can access the shuffle source directly with an adjusted PCLMULQDQ mask bit

REAPPLIED after #176932 was reverted due to use of is128BitLaneRepeatedShuffleMask instead of isRepeatedTargetShuffleMask caused failures with SM_SentinalZero shuffle mask indices - regression test added by #177414

Fixes #176880
---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  32 ++-
 llvm/test/CodeGen/X86/clmul-vector.ll   | 306 ++++++++++--------------
 llvm/test/CodeGen/X86/combine-pclmul.ll |  12 +-
 llvm/test/CodeGen/X86/pclmulqdq.ll      | 240 +++++++++----------
 4 files changed, 271 insertions(+), 319 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e54f4ed2fb26c..7a661225404b1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -61760,7 +61760,37 @@ static SDValue combineBMI(SDNode *N, SelectionDAG &DAG,
 
 static SDValue combinePCLMULQDQ(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI) {
-  unsigned NumElts = N->getSimpleValueType(0).getVectorNumElements();
+  MVT VT = N->getSimpleValueType(0);
+  unsigned NumElts = VT.getVectorNumElements();
+  assert(VT.getVectorElementType() == MVT::i64 && "vXi64 type expected");
+
+  // Use the PCLMULQDQ lo/hi mask to attempt to remove an unnecessary shuffle,
+  // rescaled back to vXi64, repeating every v2i64 sublane.
+  auto SimplifyOperand = [&](SDValue &Op, uint64_t &M) {
+    SmallVector<SDValue, 2> Src;
+    SmallVector<int, 16> Mask, ScaledMask, RepeatedMask;
+    if (!getTargetShuffleInputs(peekThroughBitcasts(Op), Src, Mask, DAG) ||
+        Src.size() != 1 || Src[0].getValueSizeInBits() != VT.getSizeInBits() ||
+        !scaleShuffleMaskElts(NumElts, Mask, ScaledMask) ||
+        !isRepeatedTargetShuffleMask(128, VT, ScaledMask, RepeatedMask) ||
+        !isInRange(RepeatedMask[M & 1], 0, 2))
+      return false;
+    Op = DAG.getBitcast(VT, Src[0]);
+    M = RepeatedMask[M & 1];
+    return true;
+  };
+
+  SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
+  uint64_t Mask = N->getConstantOperandVal(2);
+  uint64_t M0 = Mask & 0x01, M1 = (Mask & 0x10) >> 4;
+  bool Simplify0 = SimplifyOperand(N0, M0);
+  bool Simplify1 = SimplifyOperand(N1, M1);
+  if (Simplify0 || Simplify1) {
+    SDLoc DL(N);
+    return DAG.getNode(X86ISD::PCLMULQDQ, DL, VT, N0, N1,
+                       DAG.getTargetConstant((M1 << 4) | M0, DL, MVT::i8));
+  }
+
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), APInt::getAllOnes(NumElts),
                                      DCI))
diff --git a/llvm/test/CodeGen/X86/clmul-vector.ll b/llvm/test/CodeGen/X86/clmul-vector.ll
index a7483dca32a02..f88f5c3646bf8 100644
--- a/llvm/test/CodeGen/X86/clmul-vector.ll
+++ b/llvm/test/CodeGen/X86/clmul-vector.ll
@@ -855,38 +855,37 @@ define <4 x i32> @clmul_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ;
 ; SSE2-PCLMUL-LABEL: clmul_v4i32:
 ; SSE2-PCLMUL:       # %bb.0:
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm3
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm4
-; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSE2-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
+; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm3, %xmm4
+; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
 ; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
 ; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
+; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm3, %xmm1
 ; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE2-PCLMUL-NEXT:    retq
 ;
 ; SSE42-LABEL: clmul_v4i32:
 ; SSE42:       # %bb.0:
-; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
-; SSE42-NEXT:    pclmulqdq $0, %xmm2, %xmm3
-; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE42-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; SSE42-NEXT:    pclmulqdq $0, %xmm3, %xmm2
-; SSE42-NEXT:    movq %xmm2, %rax
-; SSE42-NEXT:    pinsrd $2, %eax, %xmm0
-; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm4
-; SSE42-NEXT:    movq %xmm4, %rax
-; SSE42-NEXT:    pinsrd $3, %eax, %xmm0
+; SSE42-NEXT:    movdqa %xmm0, %xmm2
+; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm2
+; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
+; SSE42-NEXT:    pclmulqdq $0, %xmm3, %xmm4
+; SSE42-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; SSE42-NEXT:    pclmulqdq $17, %xmm1, %xmm0
+; SSE42-NEXT:    movq %xmm0, %rax
+; SSE42-NEXT:    pinsrd $2, %eax, %xmm2
+; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3]
+; SSE42-NEXT:    pclmulqdq $0, %xmm3, %xmm0
+; SSE42-NEXT:    movq %xmm0, %rax
+; SSE42-NEXT:    pinsrd $3, %eax, %xmm2
+; SSE42-NEXT:    movdqa %xmm2, %xmm0
 ; SSE42-NEXT:    retq
 ;
 ; AVX2-LABEL: clmul_v4i32:
@@ -896,9 +895,7 @@ define <4 x i32> @clmul_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX2-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm3
 ; AVX2-NEXT:    vmovq %xmm3, %rax
 ; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
@@ -915,9 +912,7 @@ define <4 x i32> @clmul_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
 ; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm3
 ; AVX512-NEXT:    vmovq %xmm3, %rax
 ; AVX512-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
@@ -1360,29 +1355,24 @@ define <2 x i64> @clmul_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ;
 ; SSE-PCLMUL-LABEL: clmul_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
 ; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
-; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: clmul_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: clmul_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512-NEXT:    retq
   %res = call <2 x i64> @llvm.clmul.v2i64(<2 x i64> %a, <2 x i64> %b)
   ret <2 x i64> %res
@@ -2512,20 +2502,19 @@ define <4 x i32> @clmulr_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ;
 ; SSE2-PCLMUL-LABEL: clmulr_v4i32:
 ; SSE2-PCLMUL:       # %bb.0:
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm3
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm4
-; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE2-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
+; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm3, %xmm4
+; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
 ; SSE2-PCLMUL-NEXT:    movdqa %xmm0, %xmm3
 ; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm3
-; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
 ; SSE2-PCLMUL-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[1,1,1,1]
-; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm5
+; SSE2-PCLMUL-NEXT:    pclmulqdq $0, %xmm4, %xmm5
 ; SSE2-PCLMUL-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE2-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
 ; SSE2-PCLMUL-NEXT:    psrld $31, %xmm3
 ; SSE2-PCLMUL-NEXT:    pxor %xmm2, %xmm2
 ; SSE2-PCLMUL-NEXT:    movdqa %xmm1, %xmm4
@@ -2573,50 +2562,49 @@ define <4 x i32> @clmulr_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2,3,4,5,6,7]
 ; SSE42-NEXT:    pclmulqdq $0, %xmm2, %xmm3
 ; SSE42-NEXT:    movq %xmm3, %rax
-; SSE42-NEXT:    movdqa %xmm1, %xmm3
+; SSE42-NEXT:    movdqa %xmm0, %xmm3
 ; SSE42-NEXT:    movdqa %xmm1, %xmm4
+; SSE42-NEXT:    movdqa %xmm0, %xmm5
+; SSE42-NEXT:    movdqa %xmm1, %xmm6
+; SSE42-NEXT:    movdqa %xmm0, %xmm7
 ; SSE42-NEXT:    movdqa %xmm0, %xmm2
 ; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm2
-; SSE42-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[3,3,3,3]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm10 = xmm1[1,1,1,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm8 = xmm0[1,1,1,1]
+; SSE42-NEXT:    pshufd {{.*#+}} xmm9 = xmm0[3,3,3,3]
+; SSE42-NEXT:    pclmulqdq $17, %xmm1, %xmm0
+; SSE42-NEXT:    pshufd {{.*#+}} xmm11 = xmm1[3,3,3,3]
 ; SSE42-NEXT:    psrlq $32, %xmm1
-; SSE42-NEXT:    movdqa %xmm0, %xmm8
-; SSE42-NEXT:    movdqa %xmm0, %xmm9
-; SSE42-NEXT:    pshufd {{.*#+}} xmm10 = xmm0[1,1,1,1]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm11 = xmm0[2,3,2,3]
-; SSE42-NEXT:    pshufd {{.*#+}} xmm12 = xmm0[3,3,3,3]
-; SSE42-NEXT:    psrlq $32, %xmm0
-; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE42-NEXT:    movq %xmm0, %rcx
-; SSE42-NEXT:    pxor %xmm0, %xmm0
-; SSE42-NEXT:    punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE42-NEXT:    punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE42-NEXT:    pclmulqdq $0, %xmm3, %xmm8
-; SSE42-NEXT:    movq %xmm8, %rdx
-; SSE42-NEXT:    psrldq {{.*#+}} xmm4 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE42-NEXT:    psrldq {{.*#+}} xmm9 = xmm9[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE42-NEXT:    pclmulqdq $0, %xmm4, %xmm9
-; SSE42-NEXT:    movq %xmm9, %rsi
-; SSE42-NEXT:    pclmulqdq $0, %xmm7, %xmm10
+; SSE42-NEXT:    psrlq $32, %xmm3
+; SSE42-NEXT:    pclmulqdq $0, %xmm1, %xmm3
+; SSE42-NEXT:    movq %xmm3, %rcx
+; SSE42-NEXT:    pxor %xmm1, %xmm1
+; SSE42-NEXT:    punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSE42-NEXT:    punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; SSE42-NEXT:    pclmulqdq $0, %xmm4, %xmm5
+; SSE42-NEXT:    movq %xmm5, %rdx
+; SSE42-NEXT:    psrldq {{.*#+}} xmm6 = xmm6[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE42-NEXT:    psrldq {{.*#+}} xmm7 = xmm7[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE42-NEXT:    pclmulqdq $0, %xmm6, %xmm7
+; SSE42-NEXT:    movq %xmm7, %rsi
 ; SSE42-NEXT:    shrq $32, %rax
-; SSE42-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1]
-; SSE42-NEXT:    movd %eax, %xmm0
+; SSE42-NEXT:    pclmulqdq $0, %xmm10, %xmm8
+; SSE42-NEXT:    movd %eax, %xmm1
 ; SSE42-NEXT:    shrq $32, %rcx
-; SSE42-NEXT:    pinsrd $1, %ecx, %xmm0
+; SSE42-NEXT:    pinsrd $1, %ecx, %xmm1
 ; SSE42-NEXT:    shrq $32, %rdx
-; SSE42-NEXT:    pinsrd $2, %edx, %xmm0
+; SSE42-NEXT:    pinsrd $2, %edx, %xmm1
 ; SSE42-NEXT:    shrq $32, %rsi
-; SSE42-NEXT:    pinsrd $3, %esi, %xmm0
-; SSE42-NEXT:    paddd %xmm0, %xmm0
-; SSE42-NEXT:    pclmulqdq $0, %xmm5, %xmm11
-; SSE42-NEXT:    movq %xmm11, %rax
+; SSE42-NEXT:    pinsrd $3, %esi, %xmm1
+; SSE42-NEXT:    paddd %xmm1, %xmm1
+; SSE42-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
+; SSE42-NEXT:    movq %xmm0, %rax
 ; SSE42-NEXT:    pinsrd $2, %eax, %xmm2
-; SSE42-NEXT:    pclmulqdq $0, %xmm6, %xmm12
-; SSE42-NEXT:    movq %xmm12, %rax
+; SSE42-NEXT:    pclmulqdq $0, %xmm9, %xmm11
+; SSE42-NEXT:    movq %xmm11, %rax
 ; SSE42-NEXT:    pinsrd $3, %eax, %xmm2
 ; SSE42-NEXT:    psrld $31, %xmm2
-; SSE42-NEXT:    por %xmm0, %xmm2
+; SSE42-NEXT:    por %xmm1, %xmm2
 ; SSE42-NEXT:    movdqa %xmm2, %xmm0
 ; SSE42-NEXT:    retq
 ;
@@ -2653,9 +2641,7 @@ define <4 x i32> @clmulr_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,1,1]
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
 ; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
-; AVX2-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm4
 ; AVX2-NEXT:    vmovq %xmm4, %rax
 ; AVX2-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
@@ -2701,9 +2687,7 @@ define <4 x i32> @clmulr_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,1,1]
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
 ; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm4
 ; AVX512-NEXT:    vmovq %xmm4, %rax
 ; AVX512-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
@@ -3301,40 +3285,35 @@ define <2 x i64> @clmulr_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ;
 ; SSE-PCLMUL-LABEL: clmulr_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
 ; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
-; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
-; SSE-PCLMUL-NEXT:    paddq %xmm2, %xmm2
-; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
+; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm1
+; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-PCLMUL-NEXT:    paddq %xmm1, %xmm1
+; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE-PCLMUL-NEXT:    psrlq $63, %xmm0
-; SSE-PCLMUL-NEXT:    por %xmm2, %xmm0
+; SSE-PCLMUL-NEXT:    por %xmm1, %xmm0
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: clmulr_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
+; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm0[1],xmm2[1]
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vpsrlq $63, %xmm0, %xmm0
 ; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: clmulr_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm0[1],xmm2[1]
 ; AVX512-NEXT:    vpaddq %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512-NEXT:    vpsrlq $63, %xmm0, %xmm0
 ; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -5193,29 +5172,24 @@ define <2 x i64> @clmulh_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ;
 ; SSE-PCLMUL-LABEL: clmulh_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
 ; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
-; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: clmulh_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1]
+; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: clmulh_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1]
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX512-NEXT:    retq
   %a.ext = zext <2 x i64> %a to <2 x i128>
   %b.ext = zext <2 x i64> %b to <2 x i128>
@@ -5793,33 +5767,28 @@ define void @commutative_clmul_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0, ptr %p
 ;
 ; SSE-PCLMUL-LABEL: commutative_clmul_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
 ; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
-; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE-PCLMUL-NEXT:    movdqa %xmm0, (%rdi)
 ; SSE-PCLMUL-NEXT:    movdqa %xmm0, (%rsi)
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: commutative_clmul_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rsi)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: commutative_clmul_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rsi)
 ; AVX512-NEXT:    retq
@@ -6518,33 +6487,28 @@ define void @commutative_clmulh_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0, ptr %
 ;
 ; SSE-PCLMUL-LABEL: commutative_clmulh_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE-PCLMUL-NEXT:    movdqa %xmm1, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm0, %xmm2
 ; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm0, %xmm1
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm0
-; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
 ; SSE-PCLMUL-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE-PCLMUL-NEXT:    movdqa %xmm1, (%rsi)
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: commutative_clmulh_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1]
+; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rsi)
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: commutative_clmulh_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm0, %xmm1, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1]
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rsi)
 ; AVX512-NEXT:    retq
@@ -7248,29 +7212,26 @@ define void @commutative_clmulr_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0, ptr %
 ;
 ; SSE-PCLMUL-LABEL: commutative_clmulr_v2i64:
 ; SSE-PCLMUL:       # %bb.0:
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm0, %xmm1
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm0
 ; SSE-PCLMUL-NEXT:    movdqa %xmm1, %xmm2
-; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-PCLMUL-NEXT:    paddq %xmm2, %xmm2
-; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm0, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm0, %xmm1
+; SSE-PCLMUL-NEXT:    movdqa %xmm1, %xmm0
+; SSE-PCLMUL-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; SSE-PCLMUL-NEXT:    paddq %xmm0, %xmm0
+; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; SSE-PCLMUL-NEXT:    psrlq $63, %xmm1
-; SSE-PCLMUL-NEXT:    por %xmm2, %xmm1
+; SSE-PCLMUL-NEXT:    por %xmm0, %xmm1
 ; SSE-PCLMUL-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE-PCLMUL-NEXT:    movdqa %xmm1, (%rsi)
 ; SSE-PCLMUL-NEXT:    retq
 ;
 ; AVX2-LABEL: commutative_clmulr_v2i64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm0, %xmm1, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
+; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm0[1],xmm2[1]
 ; AVX2-NEXT:    vpaddq %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vpsrlq $63, %xmm0, %xmm0
 ; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
@@ -7279,13 +7240,11 @@ define void @commutative_clmulr_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0, ptr %
 ;
 ; AVX512-LABEL: commutative_clmulr_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm0, %xmm1, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm0[1],xmm2[1]
 ; AVX512-NEXT:    vpaddq %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512-NEXT:    vpsrlq $63, %xmm0, %xmm0
 ; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
@@ -7874,11 +7833,10 @@ define void @mul_use_commutative_clmul_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0
 ; SSE-PCLMUL-NEXT:    pushq %rbx
 ; SSE-PCLMUL-NEXT:    subq $16, %rsp
 ; SSE-PCLMUL-NEXT:    movq %rsi, %rbx
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-PCLMUL-NEXT:    movdqa %xmm0, %xmm2
+; SSE-PCLMUL-NEXT:    pclmulqdq $17, %xmm1, %xmm2
 ; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm1, %xmm0
-; SSE-PCLMUL-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-PCLMUL-NEXT:    pclmulqdq $0, %xmm2, %xmm1
-; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-PCLMUL-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE-PCLMUL-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
 ; SSE-PCLMUL-NEXT:    movdqa %xmm0, (%rdi)
 ; SSE-PCLMUL-NEXT:    callq use at PLT
@@ -7893,11 +7851,9 @@ define void @mul_use_commutative_clmul_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0
 ; AVX2-NEXT:    pushq %rbx
 ; AVX2-NEXT:    subq $16, %rsp
 ; AVX2-NEXT:    movq %rsi, %rbx
-; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
 ; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX2-NEXT:    callq use at PLT
@@ -7912,11 +7868,9 @@ define void @mul_use_commutative_clmul_v2i64(<2 x i64> %x, <2 x i64> %y, ptr %p0
 ; AVX512-NEXT:    pushq %rbx
 ; AVX512-NEXT:    subq $16, %rsp
 ; AVX512-NEXT:    movq %rsi, %rbx
-; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
 ; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
 ; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
 ; AVX512-NEXT:    callq use at PLT
diff --git a/llvm/test/CodeGen/X86/combine-pclmul.ll b/llvm/test/CodeGen/X86/combine-pclmul.ll
index 9101dfd90f1d4..4bb88636d4b60 100644
--- a/llvm/test/CodeGen/X86/combine-pclmul.ll
+++ b/llvm/test/CodeGen/X86/combine-pclmul.ll
@@ -78,9 +78,7 @@ define <8 x i64> @test_concat_pclmulqdq_v8i64_v4i64(<8 x i64> %a0, <8 x i64> %a1
 define <2 x i64> @test_shuffle_pclmulqdq_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_shuffle_pclmulqdq_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpbroadcastq %xmm0, %xmm0
-; CHECK-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; CHECK-NEXT:    vpclmulqdq $1, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpclmulqdq $16, %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %s0 = shufflevector <2 x i64> %a0, <2 x i64> poison, <2 x i32> <i32 1, i32 0>
   %s1 = shufflevector <2 x i64> %a1, <2 x i64> poison, <2 x i32> <i32 1, i32 0>
@@ -91,9 +89,7 @@ define <2 x i64> @test_shuffle_pclmulqdq_v2i64(<2 x i64> %a0, <2 x i64> %a1) {
 define <4 x i64> @test_shuffle_pclmulqdq_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
 ; CHECK-LABEL: test_shuffle_pclmulqdq_v4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; CHECK-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
-; CHECK-NEXT:    vpclmulqdq $16, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpclmulqdq $1, %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %s0 = shufflevector <4 x i64> %a0, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
   %s1 = shufflevector <4 x i64> %a1, <4 x i64> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -104,9 +100,7 @@ define <4 x i64> @test_shuffle_pclmulqdq_v4i64(<4 x i64> %a0, <4 x i64> %a1) {
 define <8 x i64> @test_shuffle_pclmulqdq_v8i64(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_shuffle_pclmulqdq_v8i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpshufd {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
-; CHECK-NEXT:    vpshufd {{.*#+}} zmm1 = zmm1[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
-; CHECK-NEXT:    vpclmulqdq $17, %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vpclmulqdq $0, %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %s0 = shufflevector <8 x i64> %a0, <8 x i64> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
   %s1 = shufflevector <8 x i64> %a1, <8 x i64> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
diff --git a/llvm/test/CodeGen/X86/pclmulqdq.ll b/llvm/test/CodeGen/X86/pclmulqdq.ll
index 25322048b69f9..c72399c0633c9 100644
--- a/llvm/test/CodeGen/X86/pclmulqdq.ll
+++ b/llvm/test/CodeGen/X86/pclmulqdq.ll
@@ -8,16 +8,15 @@
 define <2 x i64> @pclmul128_lo_hi(<2 x i64> %v0, <2 x i64> %v1) {
 ; SSE-LABEL: pclmul128_lo_hi:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; SSE-NEXT:    xorl %eax, %eax
 ; SSE-NEXT:    movq %rax, %xmm2
 ; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    pclmulqdq $0, %xmm2, %xmm3
+; SSE-NEXT:    pclmulqdq $1, %xmm2, %xmm3
 ; SSE-NEXT:    movq %xmm3, %rax
 ; SSE-NEXT:    pclmulqdq $0, %xmm0, %xmm2
 ; SSE-NEXT:    movq %xmm2, %rcx
 ; SSE-NEXT:    xorq %rax, %rcx
-; SSE-NEXT:    pclmulqdq $0, %xmm1, %xmm0
+; SSE-NEXT:    pclmulqdq $16, %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 ; SSE-NEXT:    movq %xmm1, %rax
 ; SSE-NEXT:    xorq %rcx, %rax
@@ -27,15 +26,14 @@ define <2 x i64> @pclmul128_lo_hi(<2 x i64> %v0, <2 x i64> %v1) {
 ;
 ; AVX-LABEL: pclmul128_lo_hi:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; AVX-NEXT:    xorl %eax, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm2
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm1, %xmm3
+; AVX-NEXT:    vpclmulqdq $1, %xmm2, %xmm1, %xmm3
 ; AVX-NEXT:    vmovq %xmm3, %rax
 ; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm0, %xmm2
 ; AVX-NEXT:    vmovq %xmm2, %rcx
 ; AVX-NEXT:    xorq %rax, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpclmulqdq $16, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    xorq %rcx, %rax
 ; AVX-NEXT:    vmovq %rax, %xmm1
@@ -55,17 +53,15 @@ define <2 x i64> @pclmul128_lo_hi(<2 x i64> %v0, <2 x i64> %v1) {
 define <2 x i64> @pclmul128_hi_hi(<2 x i64> %v0, <2 x i64> %v1) {
 ; SSE-LABEL: pclmul128_hi_hi:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; SSE-NEXT:    xorl %eax, %eax
 ; SSE-NEXT:    movq %rax, %xmm2
 ; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    pclmulqdq $0, %xmm2, %xmm3
+; SSE-NEXT:    pclmulqdq $1, %xmm2, %xmm3
 ; SSE-NEXT:    movq %xmm3, %rax
-; SSE-NEXT:    pclmulqdq $0, %xmm0, %xmm2
+; SSE-NEXT:    pclmulqdq $16, %xmm0, %xmm2
 ; SSE-NEXT:    movq %xmm2, %rcx
 ; SSE-NEXT:    xorq %rax, %rcx
-; SSE-NEXT:    pclmulqdq $0, %xmm1, %xmm0
+; SSE-NEXT:    pclmulqdq $17, %xmm1, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
 ; SSE-NEXT:    movq %xmm1, %rax
 ; SSE-NEXT:    xorq %rcx, %rax
@@ -75,16 +71,14 @@ define <2 x i64> @pclmul128_hi_hi(<2 x i64> %v0, <2 x i64> %v1) {
 ;
 ; AVX-LABEL: pclmul128_hi_hi:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; AVX-NEXT:    xorl %eax, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm2
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm1, %xmm3
+; AVX-NEXT:    vpclmulqdq $1, %xmm2, %xmm1, %xmm3
 ; AVX-NEXT:    vmovq %xmm3, %rax
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm0, %xmm2
+; AVX-NEXT:    vpclmulqdq $1, %xmm2, %xmm0, %xmm2
 ; AVX-NEXT:    vmovq %xmm2, %rcx
 ; AVX-NEXT:    xorq %rax, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    xorq %rcx, %rax
 ; AVX-NEXT:    vmovq %rax, %xmm1
@@ -183,27 +177,25 @@ define <4 x i64> @pclmul256_lo_lo(<4 x i64> %v0, <4 x i64> %v1) {
 define <4 x i64> @pclmul256_lo_hi(<4 x i64> %v0, <4 x i64> %v1) {
 ; SSE-LABEL: pclmul256_lo_hi:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
 ; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    movq %rax, %xmm3
+; SSE-NEXT:    movq %rax, %xmm4
 ; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    pclmulqdq $0, %xmm4, %xmm0
-; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm4
-; SSE-NEXT:    movq %xmm4, %rax
-; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm5
+; SSE-NEXT:    pclmulqdq $16, %xmm2, %xmm0
+; SSE-NEXT:    pclmulqdq $1, %xmm4, %xmm2
+; SSE-NEXT:    movq %xmm2, %rax
+; SSE-NEXT:    pclmulqdq $0, %xmm4, %xmm5
 ; SSE-NEXT:    movq %xmm5, %rcx
 ; SSE-NEXT:    xorq %rax, %rcx
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE-NEXT:    movq %xmm4, %rax
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE-NEXT:    movq %xmm2, %rax
 ; SSE-NEXT:    xorq %rcx, %rax
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm4
-; SSE-NEXT:    movq %xmm4, %rcx
-; SSE-NEXT:    pclmulqdq $0, %xmm1, %xmm3
-; SSE-NEXT:    movq %xmm3, %rdx
+; SSE-NEXT:    movdqa %xmm3, %xmm2
+; SSE-NEXT:    pclmulqdq $1, %xmm4, %xmm2
+; SSE-NEXT:    movq %xmm2, %rcx
+; SSE-NEXT:    pclmulqdq $0, %xmm1, %xmm4
+; SSE-NEXT:    movq %xmm4, %rdx
 ; SSE-NEXT:    xorq %rcx, %rdx
-; SSE-NEXT:    pclmulqdq $0, %xmm2, %xmm1
+; SSE-NEXT:    pclmulqdq $16, %xmm3, %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
 ; SSE-NEXT:    movq %xmm2, %rcx
 ; SSE-NEXT:    xorq %rdx, %rcx
@@ -219,22 +211,20 @@ define <4 x i64> @pclmul256_lo_hi(<4 x i64> %v0, <4 x i64> %v1) {
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX-NEXT:    xorl %eax, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm4
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm1, %xmm5
+; AVX-NEXT:    vpclmulqdq $1, %xmm4, %xmm1, %xmm5
 ; AVX-NEXT:    vmovq %xmm5, %rax
 ; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm0, %xmm5
 ; AVX-NEXT:    vmovq %xmm5, %rcx
 ; AVX-NEXT:    xorq %rax, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpclmulqdq $16, %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    xorq %rcx, %rax
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm1, %xmm3
-; AVX-NEXT:    vmovq %xmm3, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm2, %xmm3
-; AVX-NEXT:    vmovq %xmm3, %rdx
+; AVX-NEXT:    vpclmulqdq $1, %xmm4, %xmm3, %xmm1
+; AVX-NEXT:    vmovq %xmm1, %rcx
+; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm2, %xmm1
+; AVX-NEXT:    vmovq %xmm1, %rdx
 ; AVX-NEXT:    xorq %rcx, %rdx
-; AVX-NEXT:    vpclmulqdq $0, %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vpclmulqdq $16, %xmm3, %xmm2, %xmm1
 ; AVX-NEXT:    vpextrq $1, %xmm1, %rcx
 ; AVX-NEXT:    xorq %rdx, %rcx
 ; AVX-NEXT:    vmovq %rcx, %xmm2
@@ -266,49 +256,45 @@ define <4 x i64> @pclmul256_lo_hi(<4 x i64> %v0, <4 x i64> %v1) {
 define <8 x i64> @pclmul512_lo_hi(<8 x i64> %v0, <8 x i64> %v1) {
 ; SSE-LABEL: pclmul512_lo_hi:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pshufd {{.*#+}} xmm9 = xmm4[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm8 = xmm5[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
 ; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    movq %rax, %xmm5
-; SSE-NEXT:    movdqa %xmm0, %xmm7
-; SSE-NEXT:    pclmulqdq $0, %xmm9, %xmm0
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm9
-; SSE-NEXT:    movq %xmm9, %rax
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm7
-; SSE-NEXT:    movq %xmm7, %rcx
+; SSE-NEXT:    movq %rax, %xmm8
+; SSE-NEXT:    movdqa %xmm0, %xmm9
+; SSE-NEXT:    pclmulqdq $16, %xmm4, %xmm0
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm4
+; SSE-NEXT:    movq %xmm4, %rax
+; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm9
+; SSE-NEXT:    movq %xmm9, %rcx
 ; SSE-NEXT:    xorq %rax, %rcx
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
-; SSE-NEXT:    movq %xmm7, %rax
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; SSE-NEXT:    movq %xmm4, %rax
 ; SSE-NEXT:    xorq %rcx, %rax
-; SSE-NEXT:    movdqa %xmm1, %xmm7
-; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm1
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm8
-; SSE-NEXT:    movq %xmm8, %rcx
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm7
-; SSE-NEXT:    movq %xmm7, %rdx
+; SSE-NEXT:    movdqa %xmm1, %xmm4
+; SSE-NEXT:    pclmulqdq $16, %xmm5, %xmm1
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm5
+; SSE-NEXT:    movq %xmm5, %rcx
+; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
+; SSE-NEXT:    movq %xmm4, %rdx
 ; SSE-NEXT:    xorq %rcx, %rdx
-; SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
-; SSE-NEXT:    movq %xmm7, %rcx
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSE-NEXT:    movq %xmm4, %rcx
 ; SSE-NEXT:    xorq %rdx, %rcx
-; SSE-NEXT:    movdqa %xmm2, %xmm7
-; SSE-NEXT:    pclmulqdq $0, %xmm6, %xmm2
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm6
+; SSE-NEXT:    movdqa %xmm2, %xmm4
+; SSE-NEXT:    pclmulqdq $16, %xmm6, %xmm2
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm6
 ; SSE-NEXT:    movq %xmm6, %rdx
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm7
-; SSE-NEXT:    movq %xmm7, %rsi
+; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
+; SSE-NEXT:    movq %xmm4, %rsi
 ; SSE-NEXT:    xorq %rdx, %rsi
-; SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[2,3,2,3]
-; SSE-NEXT:    movq %xmm6, %rdx
+; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; SSE-NEXT:    movq %xmm4, %rdx
 ; SSE-NEXT:    xorq %rsi, %rdx
-; SSE-NEXT:    movdqa %xmm4, %xmm6
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm6
-; SSE-NEXT:    movq %xmm6, %rsi
-; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm5
-; SSE-NEXT:    movq %xmm5, %rdi
+; SSE-NEXT:    movdqa %xmm7, %xmm4
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm4
+; SSE-NEXT:    movq %xmm4, %rsi
+; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm8
+; SSE-NEXT:    movq %xmm8, %rdi
 ; SSE-NEXT:    xorq %rsi, %rdi
-; SSE-NEXT:    pclmulqdq $0, %xmm4, %xmm3
+; SSE-NEXT:    pclmulqdq $16, %xmm7, %xmm3
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
 ; SSE-NEXT:    movq %xmm4, %rsi
 ; SSE-NEXT:    xorq %rdi, %rsi
@@ -324,55 +310,51 @@ define <8 x i64> @pclmul512_lo_hi(<8 x i64> %v0, <8 x i64> %v1) {
 ;
 ; AVX-LABEL: pclmul512_lo_hi:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm8
 ; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
 ; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    vmovq %rax, %xmm2
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm8, %xmm9
+; AVX-NEXT:    vmovq %rax, %xmm6
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm2, %xmm9
 ; AVX-NEXT:    vmovq %xmm9, %rax
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm0, %xmm9
+; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm0, %xmm9
 ; AVX-NEXT:    vmovq %xmm9, %rcx
 ; AVX-NEXT:    xorq %rax, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm8, %xmm0, %xmm0
+; AVX-NEXT:    vpclmulqdq $16, %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    xorq %rcx, %rax
-; AVX-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm7, %xmm8
-; AVX-NEXT:    vmovq %xmm8, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm6, %xmm8
-; AVX-NEXT:    vmovq %xmm8, %rdx
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm8, %xmm2
+; AVX-NEXT:    vmovq %xmm2, %rcx
+; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm2
+; AVX-NEXT:    vmovq %xmm2, %rdx
 ; AVX-NEXT:    xorq %rcx, %rdx
-; AVX-NEXT:    vpclmulqdq $0, %xmm7, %xmm6, %xmm6
-; AVX-NEXT:    vpextrq $1, %xmm6, %rcx
+; AVX-NEXT:    vpclmulqdq $16, %xmm8, %xmm7, %xmm2
+; AVX-NEXT:    vpextrq $1, %xmm2, %rcx
 ; AVX-NEXT:    xorq %rdx, %rcx
-; AVX-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm7
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm3, %xmm7
 ; AVX-NEXT:    vmovq %xmm7, %rdx
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm1, %xmm7
+; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm1, %xmm7
 ; AVX-NEXT:    vmovq %xmm7, %rsi
 ; AVX-NEXT:    xorq %rdx, %rsi
-; AVX-NEXT:    vpclmulqdq $0, %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpclmulqdq $16, %xmm3, %xmm1, %xmm1
 ; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
 ; AVX-NEXT:    xorq %rsi, %rdx
-; AVX-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm5
-; AVX-NEXT:    vmovq %xmm5, %rsi
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm4, %xmm2
-; AVX-NEXT:    vmovq %xmm2, %rdi
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm5, %xmm3
+; AVX-NEXT:    vmovq %xmm3, %rsi
+; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm4, %xmm3
+; AVX-NEXT:    vmovq %xmm3, %rdi
 ; AVX-NEXT:    xorq %rsi, %rdi
-; AVX-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm2
-; AVX-NEXT:    vpextrq $1, %xmm2, %rsi
+; AVX-NEXT:    vpclmulqdq $16, %xmm5, %xmm4, %xmm3
+; AVX-NEXT:    vpextrq $1, %xmm3, %rsi
 ; AVX-NEXT:    xorq %rdi, %rsi
-; AVX-NEXT:    vmovq %rcx, %xmm3
-; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm6[0],xmm3[0]
+; AVX-NEXT:    vmovq %rcx, %xmm4
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
 ; AVX-NEXT:    vmovq %rax, %xmm4
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX-NEXT:    vmovq %rsi, %xmm3
-; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vmovq %rsi, %xmm2
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX-NEXT:    vmovq %rdx, %xmm3
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
 ; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
@@ -418,37 +400,33 @@ define <8 x i64> @pclmul512_lo_hi(<8 x i64> %v0, <8 x i64> %v1) {
 define <8 x i64> @pclmul512_hi_lo(<8 x i64> %v0, <8 x i64> %v1) {
 ; SSE-LABEL: pclmul512_hi_lo:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
 ; SSE-NEXT:    xorl %eax, %eax
 ; SSE-NEXT:    movq %rax, %xmm8
 ; SSE-NEXT:    movdqa %xmm0, %xmm9
-; SSE-NEXT:    pclmulqdq $0, %xmm4, %xmm0
+; SSE-NEXT:    pclmulqdq $1, %xmm4, %xmm0
 ; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
 ; SSE-NEXT:    movq %xmm4, %rax
-; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm9
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm9
 ; SSE-NEXT:    movq %xmm9, %rcx
 ; SSE-NEXT:    xorq %rax, %rcx
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
 ; SSE-NEXT:    movq %xmm4, %rax
 ; SSE-NEXT:    xorq %rcx, %rax
 ; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pclmulqdq $0, %xmm5, %xmm1
+; SSE-NEXT:    pclmulqdq $1, %xmm5, %xmm1
 ; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm5
 ; SSE-NEXT:    movq %xmm5, %rcx
-; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm4
 ; SSE-NEXT:    movq %xmm4, %rdx
 ; SSE-NEXT:    xorq %rcx, %rdx
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
 ; SSE-NEXT:    movq %xmm4, %rcx
 ; SSE-NEXT:    xorq %rdx, %rcx
 ; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pclmulqdq $0, %xmm6, %xmm2
+; SSE-NEXT:    pclmulqdq $1, %xmm6, %xmm2
 ; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm6
 ; SSE-NEXT:    movq %xmm6, %rdx
-; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
+; SSE-NEXT:    pclmulqdq $1, %xmm8, %xmm4
 ; SSE-NEXT:    movq %xmm4, %rsi
 ; SSE-NEXT:    xorq %rdx, %rsi
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
@@ -457,10 +435,10 @@ define <8 x i64> @pclmul512_hi_lo(<8 x i64> %v0, <8 x i64> %v1) {
 ; SSE-NEXT:    movdqa %xmm7, %xmm4
 ; SSE-NEXT:    pclmulqdq $0, %xmm8, %xmm4
 ; SSE-NEXT:    movq %xmm4, %rsi
-; SSE-NEXT:    pclmulqdq $0, %xmm3, %xmm8
+; SSE-NEXT:    pclmulqdq $16, %xmm3, %xmm8
 ; SSE-NEXT:    movq %xmm8, %rdi
 ; SSE-NEXT:    xorq %rsi, %rdi
-; SSE-NEXT:    pclmulqdq $0, %xmm7, %xmm3
+; SSE-NEXT:    pclmulqdq $1, %xmm7, %xmm3
 ; SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
 ; SSE-NEXT:    movq %xmm4, %rsi
 ; SSE-NEXT:    xorq %rdi, %rsi
@@ -478,44 +456,40 @@ define <8 x i64> @pclmul512_hi_lo(<8 x i64> %v0, <8 x i64> %v1) {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm7
 ; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm8
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX-NEXT:    vextractf128 $1, %ymm3, %xmm5
 ; AVX-NEXT:    xorl %eax, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm6
 ; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm2, %xmm9
 ; AVX-NEXT:    vmovq %xmm9, %rax
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm0, %xmm9
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm0, %xmm9
 ; AVX-NEXT:    vmovq %xmm9, %rcx
 ; AVX-NEXT:    xorq %rax, %rcx
-; AVX-NEXT:    vpclmulqdq $0, %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpclmulqdq $1, %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpextrq $1, %xmm0, %rax
 ; AVX-NEXT:    xorq %rcx, %rax
 ; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm8, %xmm2
 ; AVX-NEXT:    vmovq %xmm2, %rcx
-; AVX-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm2, %xmm7
-; AVX-NEXT:    vmovq %xmm7, %rdx
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm7, %xmm2
+; AVX-NEXT:    vmovq %xmm2, %rdx
 ; AVX-NEXT:    xorq %rcx, %rdx
-; AVX-NEXT:    vpclmulqdq $0, %xmm8, %xmm2, %xmm2
+; AVX-NEXT:    vpclmulqdq $1, %xmm8, %xmm7, %xmm2
 ; AVX-NEXT:    vpextrq $1, %xmm2, %rcx
 ; AVX-NEXT:    xorq %rdx, %rcx
 ; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm3, %xmm7
 ; AVX-NEXT:    vmovq %xmm7, %rdx
-; AVX-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm1, %xmm7
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm1, %xmm7
 ; AVX-NEXT:    vmovq %xmm7, %rsi
 ; AVX-NEXT:    xorq %rdx, %rsi
-; AVX-NEXT:    vpclmulqdq $0, %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpclmulqdq $1, %xmm3, %xmm1, %xmm1
 ; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
 ; AVX-NEXT:    xorq %rsi, %rdx
-; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm4, %xmm3
+; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm5, %xmm3
 ; AVX-NEXT:    vmovq %xmm3, %rsi
-; AVX-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,3,2,3]
-; AVX-NEXT:    vpclmulqdq $0, %xmm6, %xmm3, %xmm5
-; AVX-NEXT:    vmovq %xmm5, %rdi
+; AVX-NEXT:    vpclmulqdq $1, %xmm6, %xmm4, %xmm3
+; AVX-NEXT:    vmovq %xmm3, %rdi
 ; AVX-NEXT:    xorq %rsi, %rdi
-; AVX-NEXT:    vpclmulqdq $0, %xmm4, %xmm3, %xmm3
+; AVX-NEXT:    vpclmulqdq $1, %xmm5, %xmm4, %xmm3
 ; AVX-NEXT:    vpextrq $1, %xmm3, %rsi
 ; AVX-NEXT:    xorq %rdi, %rsi
 ; AVX-NEXT:    vmovq %rcx, %xmm4



More information about the llvm-commits mailing list