[llvm] r350999 - [X86] Improve vXi64 ISD::ABS codegen with SSE41+
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Jan 12 02:28:12 PST 2019
Author: rksimon
Date: Sat Jan 12 02:28:12 2019
New Revision: 350999
URL: http://llvm.org/viewvc/llvm-project?rev=350999&view=rev
Log:
[X86] Improve vXi64 ISD::ABS codegen with SSE41+
Make use of vblendvpd to select on the signbit
Differential Revision: https://reviews.llvm.org/D56544
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/combine-abs.ll
llvm/trunk/test/CodeGen/X86/viabs.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=350999&r1=350998&r2=350999&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Jan 12 02:28:12 2019
@@ -23602,6 +23602,15 @@ static SDValue LowerABS(SDValue Op, cons
return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
}
+ // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
+ if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
+ SDLoc DL(Op);
+ SDValue Src = Op.getOperand(0);
+ SDValue Sub =
+ DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
+ return DAG.getNode(X86ISD::SHRUNKBLEND, DL, VT, Src, Sub, Src);
+ }
+
if (VT.is256BitVector() && !Subtarget.hasInt256()) {
assert(VT.isInteger() &&
"Only handle AVX 256-bit vector integer operation");
Modified: llvm/trunk/test/CodeGen/X86/combine-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-abs.ll?rev=350999&r1=350998&r2=350999&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-abs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-abs.ll Sat Jan 12 02:28:12 2019
@@ -67,9 +67,8 @@ define <4 x i64> @combine_v4i64_abs_abs(
; AVX2-LABEL: combine_v4i64_abs_abs:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
+; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_v4i64_abs_abs:
Modified: llvm/trunk/test/CodeGen/X86/viabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/viabs.ll?rev=350999&r1=350998&r2=350999&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/viabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/viabs.ll Sat Jan 12 02:28:12 2019
@@ -528,29 +528,43 @@ define <16 x i32> @test_abs_le_16i32(<16
}
define <2 x i64> @test_abs_ge_v2i64(<2 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_ge_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: paddq %xmm1, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: test_abs_ge_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: test_abs_ge_v2i64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: psrad $31, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: test_abs_ge_v2i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psubq %xmm0, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_abs_ge_v2i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_ge_v2i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
-; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_ge_v2i64:
@@ -564,39 +578,62 @@ define <2 x i64> @test_abs_ge_v2i64(<2 x
}
define <4 x i64> @test_abs_gt_v4i64(<4 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_gt_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT: paddq %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT: paddq %xmm2, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: test_abs_gt_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: paddq %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: test_abs_gt_v4i64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: psrad $31, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm2, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: psrad $31, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: test_abs_gt_v4i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: psubq %xmm0, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: psubq %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: movapd %xmm2, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_abs_gt_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_gt_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm1
+; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_gt_v4i64:
@@ -610,60 +647,97 @@ define <4 x i64> @test_abs_gt_v4i64(<4 x
}
define <8 x i64> @test_abs_le_v8i64(<8 x i64> %a) nounwind {
-; SSE-LABEL: test_abs_le_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: pxor %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm1
-; SSE-NEXT: pxor %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm2
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm3
-; SSE-NEXT: retq
+; SSE2-LABEL: test_abs_le_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: test_abs_le_v8i64:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: test_abs_le_v8i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm0, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm4
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm1
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm2
+; SSE41-NEXT: psubq %xmm3, %xmm5
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm3
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v8i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm4
-; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
-; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v8i64:
@@ -677,55 +751,103 @@ define <8 x i64> @test_abs_le_v8i64(<8 x
}
define <8 x i64> @test_abs_le_v8i64_fold(<8 x i64>* %a.ptr) nounwind {
-; SSE-LABEL: test_abs_le_v8i64_fold:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqu (%rdi), %xmm0
-; SSE-NEXT: movdqu 16(%rdi), %xmm1
-; SSE-NEXT: movdqu 32(%rdi), %xmm2
-; SSE-NEXT: movdqu 48(%rdi), %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: pxor %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm1
-; SSE-NEXT: pxor %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm2
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: paddq %xmm4, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm3
-; SSE-NEXT: retq
+; SSE2-LABEL: test_abs_le_v8i64_fold:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqu (%rdi), %xmm0
+; SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; SSE2-NEXT: movdqu 32(%rdi), %xmm2
+; SSE2-NEXT: movdqu 48(%rdi), %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: paddq %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: test_abs_le_v8i64_fold:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqu (%rdi), %xmm0
+; SSSE3-NEXT: movdqu 16(%rdi), %xmm1
+; SSSE3-NEXT: movdqu 32(%rdi), %xmm2
+; SSSE3-NEXT: movdqu 48(%rdi), %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: paddq %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: test_abs_le_v8i64_fold:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqu (%rdi), %xmm1
+; SSE41-NEXT: movdqu 16(%rdi), %xmm2
+; SSE41-NEXT: movdqu 32(%rdi), %xmm3
+; SSE41-NEXT: movdqu 48(%rdi), %xmm4
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm1
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm2
+; SSE41-NEXT: pxor %xmm6, %xmm6
+; SSE41-NEXT: psubq %xmm3, %xmm6
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm3
+; SSE41-NEXT: psubq %xmm4, %xmm5
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT: movapd %xmm1, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm1
+; SSE41-NEXT: movapd %xmm3, %xmm2
+; SSE41-NEXT: movapd %xmm4, %xmm3
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_abs_le_v8i64_fold:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqu (%rdi), %xmm0
-; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
-; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
-; AVX1-NEXT: vmovdqu 48(%rdi), %xmm3
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5
-; AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmovupd (%rdi), %ymm0
+; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq 16(%rdi), %xmm2, %xmm3
+; AVX1-NEXT: vpsubq (%rdi), %xmm2, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX1-NEXT: vpsubq 48(%rdi), %xmm2, %xmm3
+; AVX1-NEXT: vpsubq 32(%rdi), %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_abs_le_v8i64_fold:
@@ -733,12 +855,10 @@ define <8 x i64> @test_abs_le_v8i64_fold
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
-; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm0, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_abs_le_v8i64_fold:
More information about the llvm-commits
mailing list