[llvm-branch-commits] [llvm] ea65b89 - [X86] Fold undef elts to 0 in getTargetVShiftByConstNode.
Tom Stellard via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Jun 16 12:30:42 PDT 2020
Author: Craig Topper
Date: 2020-06-16T12:30:13-07:00
New Revision: ea65b89665c7edcd72ae924d4efad83e79931cd6
URL: https://github.com/llvm/llvm-project/commit/ea65b89665c7edcd72ae924d4efad83e79931cd6
DIFF: https://github.com/llvm/llvm-project/commit/ea65b89665c7edcd72ae924d4efad83e79931cd6.diff
LOG: [X86] Fold undef elts to 0 in getTargetVShiftByConstNode.
Similar to D81212.
Differential Revision: https://reviews.llvm.org/D81292
(cherry picked from commit 3408dcbdf054ac3cc32a97a6a82a3cf5844be609)
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vec_shift5.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e360177687b1..1523d56cc4e7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23319,7 +23319,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
- Elts.push_back(CurrentOp);
+ // Must produce 0s in the correct bits.
+ Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
@@ -23331,7 +23332,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
- Elts.push_back(CurrentOp);
+ // Must produce 0s in the correct bits.
+ Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
@@ -23343,7 +23345,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
- Elts.push_back(CurrentOp);
+ // All shifted in bits must be the same so use 0.
+ Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
diff --git a/llvm/test/CodeGen/X86/vec_shift5.ll b/llvm/test/CodeGen/X86/vec_shift5.ll
index 5c84d7c748f0..eef51e3ed365 100644
--- a/llvm/test/CodeGen/X86/vec_shift5.ll
+++ b/llvm/test/CodeGen/X86/vec_shift5.ll
@@ -121,12 +121,12 @@ define <2 x i64> @test8() {
define <8 x i16> @test9() {
; X32-LABEL: test9:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X32-NEXT: retl
;
; X64-LABEL: test9:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
ret <8 x i16> %1
@@ -135,12 +135,12 @@ define <8 x i16> @test9() {
define <4 x i32> @test10() {
; X32-LABEL: test10:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4]
; X32-NEXT: retl
;
; X64-LABEL: test10:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
ret <4 x i32> %1
@@ -154,7 +154,7 @@ define <2 x i64> @test11() {
;
; X64-LABEL: test11:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <u,3>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
ret <2 x i64> %1
@@ -163,12 +163,12 @@ define <2 x i64> @test11() {
define <8 x i16> @test12() {
; X32-LABEL: test12:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X32-NEXT: retl
;
; X64-LABEL: test12:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
ret <8 x i16> %1
@@ -177,12 +177,12 @@ define <8 x i16> @test12() {
define <4 x i32> @test13() {
; X32-LABEL: test13:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4]
; X32-NEXT: retl
;
; X64-LABEL: test13:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <u,1,u,4>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,1,0,4]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
ret <4 x i32> %1
@@ -191,12 +191,12 @@ define <4 x i32> @test13() {
define <8 x i16> @test14() {
; X32-LABEL: test14:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X32-NEXT: retl
;
; X64-LABEL: test14:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <1,1,u,u,3,u,8,16>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [1,1,0,0,3,0,8,16]
; X64-NEXT: retq
%1 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> <i16 15, i16 8, i16 undef, i16 undef, i16 31, i16 undef, i16 64, i16 128>, i32 3)
ret <8 x i16> %1
@@ -205,12 +205,12 @@ define <8 x i16> @test14() {
define <4 x i32> @test15() {
; X32-LABEL: test15:
; X32: # %bb.0:
-; X32-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
+; X32-NEXT: movaps {{.*#+}} xmm0 = [0,64,0,256]
; X32-NEXT: retl
;
; X64-LABEL: test15:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <u,64,u,256>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,64,0,256]
; X64-NEXT: retq
%1 = tail call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> <i32 undef, i32 8, i32 undef, i32 32>, i32 3)
ret <4 x i32> %1
@@ -224,7 +224,7 @@ define <2 x i64> @test16() {
;
; X64-LABEL: test16:
; X64: # %bb.0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = <u,248>
+; X64-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,248,0,0,0,0,0,0,0]
; X64-NEXT: retq
%1 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> <i64 undef, i64 31>, i32 3)
ret <2 x i64> %1
More information about the llvm-branch-commits
mailing list