[llvm] [AArch64][GlobalISel] Prefer to use Vector Truncate (PR #105692)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 28 06:50:24 PDT 2024
https://github.com/chuongg3 updated https://github.com/llvm/llvm-project/pull/105692
>From 5202280c51d2f2f1bda1bd42a7402e8a7f968360 Mon Sep 17 00:00:00 2001
From: Tuan Chuong Goh <chuong.goh at arm.com>
Date: Wed, 21 Aug 2024 09:23:38 +0000
Subject: [PATCH] [AArch64][GlobalISel] Prefer to use Vector Truncate
Tries to combine scalarised truncates into vector truncate
operations and prefer to use G_CONCAT_VECTOR instructions
over G_BUILD_VECTORS instructions
EXAMPLE:
%a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
%T_a(i16) = G_TRUNC %a(i32)
%T_b(i16) = G_TRUNC %b(i32)
%Undef(i16) = G_IMPLICIT_DEF(i16)
%dst(v4i16) = G_BUILD_VECTORS %T_a(i16), %T_b(i16), %Undef(i16), %Undef(i16)
===>
%Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
%Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
%dst(<4 x s16>) = G_TRUNC %Mid(<4 x s32>)
This will ALSO match:
%a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
%Undef(i32) = G_IMPLICIT_DEF(i32)
%dst(v4i32) = G_BUILD_VECTORS %T_a(i32), %T_b(i32), %Undef(i32), %Undef(i32)
===>
%Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
%Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
---
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 3 +
.../include/llvm/Target/GlobalISel/Combine.td | 10 +-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 114 ++++++++++++++++++
llvm/lib/Target/AArch64/AArch64Combine.td | 2 +-
.../AArch64/GISel/AArch64LegalizerInfo.cpp | 3 +-
.../AArch64/GlobalISel/legalize-freeze.mir | 24 +---
.../GlobalISel/legalize-insert-vector-elt.mir | 11 +-
llvm/test/CodeGen/AArch64/bswap.ll | 4 +-
llvm/test/CodeGen/AArch64/concat-vector.ll | 19 ++-
.../AArch64/fixed-vector-interleave.ll | 20 +--
llvm/test/CodeGen/AArch64/fptoi.ll | 52 ++------
llvm/test/CodeGen/AArch64/itofp.ll | 11 +-
llvm/test/CodeGen/AArch64/shift.ll | 72 ++---------
llvm/test/CodeGen/AArch64/shufflevector.ll | 32 ++---
llvm/test/CodeGen/AArch64/xtn.ll | 17 +--
15 files changed, 190 insertions(+), 204 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9b62d6067be39c..9eb72eb39bfb4f 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -599,6 +599,9 @@ class CombinerHelper {
bool matchRotateOutOfRange(MachineInstr &MI);
void applyRotateOutOfRange(MachineInstr &MI);
+ bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
+ void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
+
/// \returns true if a G_ICMP instruction \p MI can be replaced with a true
/// or false constant based off of KnownBits information.
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 525cc815e73cef..6187cedaba6c57 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1602,6 +1602,14 @@ def insert_vector_elt_oob : GICombineRule<
[{ return Helper.matchInsertVectorElementOOB(*${root}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+// Combine v8i8 (buildvector i8 (trunc(unmerge)), i8 (trunc), i8 (trunc), i8 (trunc), undef, undef, undef, undef)
+def combine_use_vector_truncate : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ // (match (wip_match_opcode G_BUILD_VECTOR):$root,
+ (match (G_BUILD_VECTOR $dst, GIVariadic<>:$unused):$root,
+ [{ return Helper.matchUseVectorTruncate(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyUseVectorTruncate(*${root}, ${matchinfo}); }])>;
+
def add_of_vscale : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
(match (G_VSCALE $left, $imm1),
@@ -1967,7 +1975,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
combine_concat_vector, double_icmp_zero_and_or_combine, match_addos,
- sext_trunc, zext_trunc, prefer_sign_combines, combine_shuffle_concat]>;
+ sext_trunc, zext_trunc, prefer_sign_combines, combine_shuffle_concat, combine_use_vector_truncate]>;
// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 1517ae707c8cff..bc8f1ccc88fd0a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3313,6 +3313,120 @@ static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
isConstTrueVal(TLI, Cst, IsVector, IsFP);
}
+// This combine tries to reduce the number of scalarised G_TRUNC instructions by
+// using vector truncates instead
+//
+// EXAMPLE:
+// %a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
+// %T_a(i16) = G_TRUNC %a(i32)
+// %T_b(i16) = G_TRUNC %b(i32)
+// %Undef(i16) = G_IMPLICIT_DEF(i16)
+// %dst(v4i16) = G_BUILD_VECTORS %T_a(i16), %T_b(i16), %Undef(i16), %Undef(i16)
+//
+// ===>
+// %Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
+// %Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
+// %dst(<4 x s16>) = G_TRUNC %Mid(<4 x s32>)
+//
+// This will ALSO match:
+// %a(i32), %b(i32) = G_UNMERGE_VALUES %src(<2 x i32>)
+// %Undef(i32) = G_IMPLICIT_DEF(i32)
+// %dst(v4i32) = G_BUILD_VECTORS %T_a(i32), %T_b(i32), %Undef(i32), %Undef(i32)
+//
+// ===>
+// %Undef(<2 x i32>) = G_IMPLICIT_DEF(<2 x i32>)
+// %Mid(<4 x s32>) = G_CONCAT_VECTORS %src(<2 x i32>), %Undef(<2 x i32>)
+//
+bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
+ Register &MatchInfo) {
+ assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
+ "Expected G_BUILD_VECTOR instruction\n");
+
+ unsigned NumOperands = MI.getNumOperands();
+ LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
+
+ // Check the G_BUILD_VECTOR sources
+ SmallVector<MachineInstr *> TruncMIs;
+ for (unsigned i = 1; i < NumOperands; ++i) {
+ auto SrcMI = MRI.getVRegDef(MI.getOperand(i).getReg());
+ auto SrcMIOpc = SrcMI->getOpcode();
+
+ if (SrcMIOpc == TargetOpcode::G_TRUNC)
+ TruncMIs.push_back(SrcMI);
+ else if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
+ return false;
+ }
+
+ if (TruncMIs.size() < 2)
+ return false;
+
+ // Check if the Trunc instructions all come from the same MI
+ auto UnmergeMI = MRI.getVRegDef(TruncMIs[0]->getOperand(1).getReg());
+ if (UnmergeMI->getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
+ return false;
+
+ for (auto TruncMI : TruncMIs) {
+ auto SrcMI = MRI.getVRegDef(TruncMI->getOperand(1).getReg());
+ if (!UnmergeMI->isIdenticalTo(*SrcMI))
+ return false;
+ }
+
+ // Check the size of unmerge source
+ unsigned numOps = UnmergeMI->getNumOperands();
+ MatchInfo = UnmergeMI->getOperand(numOps - 1).getReg();
+ LLT UnmergeSrcTy = MRI.getType(MatchInfo);
+ unsigned DstTyNumElts = DstTy.getNumElements();
+ unsigned UnmergeSrcTyNumElts = UnmergeSrcTy.getNumElements();
+ if (DstTyNumElts % UnmergeSrcTyNumElts != 0)
+ return false;
+
+ // If post legalizer, ensure generated instructions are legal
+ if (!IsPreLegalize) {
+ LLT MidTy = DstTy.changeElementSize(UnmergeSrcTy.getScalarSizeInBits());
+
+ if (DstTyNumElts != UnmergeSrcTyNumElts &&
+ !isLegal({TargetOpcode::G_CONCAT_VECTORS, {MidTy, UnmergeSrcTy}}))
+ return false;
+
+ if (!isLegal({TargetOpcode::G_TRUNC, {DstTy, MidTy}}))
+ return false;
+ }
+
+ return true;
+}
+
+void CombinerHelper::applyUseVectorTruncate(MachineInstr &MI,
+ Register &MatchInfo) {
+ assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
+ "Expected G_BUILD_VECTOR instruction\n");
+
+ Register MidReg;
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ LLT UnmergeSrcTy = MRI.getType(MatchInfo);
+ unsigned DstTyNumElt = DstTy.getNumElements();
+ unsigned UnmergeSrcTyNumElt = UnmergeSrcTy.getNumElements();
+
+ // No need to pad vector if only G_TRUNC is needed
+ if (DstTyNumElt / UnmergeSrcTyNumElt == 1) {
+ MidReg = MatchInfo;
+ } else {
+ Register UndefReg = Builder.buildUndef(UnmergeSrcTy).getReg(0);
+ SmallVector<Register> ConcatRegs = {MatchInfo};
+ for (unsigned i = 1; i < DstTyNumElt / UnmergeSrcTyNumElt; ++i)
+ ConcatRegs.push_back(UndefReg);
+
+ MidReg = Builder
+ .buildConcatVectors(DstTy.changeElementSize(
+ UnmergeSrcTy.getScalarSizeInBits()),
+ ConcatRegs)
+ .getReg(0);
+ }
+
+ Builder.buildTrunc(DstReg, MidReg);
+ MI.eraseFromParent();
+}
+
bool CombinerHelper::matchNotCmp(MachineInstr &MI,
SmallVectorImpl<Register> &RegsToNegate) {
assert(MI.getOpcode() == TargetOpcode::G_XOR);
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 3f717c8a60050f..d479b66c4bb533 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -322,5 +322,5 @@ def AArch64PostLegalizerCombiner
select_to_minmax, or_to_bsp, combine_concat_vector,
commute_constant_to_rhs,
push_freeze_to_prevent_poison_from_propagating,
- combine_mul_cmlt]> {
+ combine_mul_cmlt, combine_use_vector_truncate]> {
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 7eaf6a84bd204f..2f78ee7b57d27d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -95,7 +95,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder(
{G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
.legalFor({p0, s8, s16, s32, s64})
- .legalFor(PackedVectorAllTypeList)
+ .legalFor({v16s8, v8s16, v4s32, v2s64, v2p0, v8s8, v4s16, v2s32, v4s8,
+ v2s16, v2s8})
.widenScalarToNextPow2(0)
.clampScalar(0, s8, s64)
.moreElementsToNextPow2(0)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
index 3e768c4d7a267c..03c28efe7e09fb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
@@ -159,25 +159,13 @@ body: |
; CHECK-LABEL: name: test_freeze_v3s8
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(<4 x s16>), [[UV5:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
- ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<4 x s16>) = G_FREEZE [[UV4]]
- ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[FREEZE]](<4 x s16>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<4 x s8>) = G_FREEZE [[DEF]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[FREEZE]](<4 x s8>)
; CHECK-NEXT: %undef:_(s32) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK-NEXT: %ext0:_(s32) = G_AND [[ANYEXT1]], [[C]]
- ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
- ; CHECK-NEXT: %ext1:_(s32) = G_AND [[ANYEXT2]], [[C]]
- ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
- ; CHECK-NEXT: %ext2:_(s32) = G_AND [[ANYEXT3]], [[C]]
+ ; CHECK-NEXT: %ext0:_(s32) = G_ZEXT [[UV]](s8)
+ ; CHECK-NEXT: %ext1:_(s32) = G_ZEXT [[UV1]](s8)
+ ; CHECK-NEXT: %ext2:_(s32) = G_ZEXT [[UV2]](s8)
; CHECK-NEXT: %res:_(<4 x s32>) = G_BUILD_VECTOR %ext0(s32), %ext1(s32), %ext2(s32), %undef(s32)
; CHECK-NEXT: $q0 = COPY %res(<4 x s32>)
%x:_(<3 x s8>) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index 9a8697c1d9b866..11c6c7fb40faa1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -248,13 +248,10 @@ body: |
; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(s8) = G_TRUNC [[UV6]](s16)
- ; CHECK-NEXT: [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[UV7]](s16)
- ; CHECK-NEXT: [[TRUNC8:%[0-9]+]]:_(s8) = G_TRUNC [[UV8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8), [[TRUNC8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(<4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[DEF2]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[UV6]](s8), [[UV7]](s8), [[UV8]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<16 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 16, 16, 16, 1, 16, 16, 16, 2, 16, 16, 16, undef, undef, undef, undef)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[SHUF]](<16 x s8>)
; CHECK-NEXT: [[UITOFP:%[0-9]+]]:_(<4 x s32>) = G_UITOFP [[BITCAST]](<4 x s32>)
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index 071613b9cc011e..0292414ddb5c0e 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -109,9 +109,7 @@ define <2 x i16> @bswap_v2i16(<2 x i16> %a){
;
; CHECK-GI-LABEL: bswap_v2i16:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: rev16 v0.8b, v0.8b
; CHECK-GI-NEXT: mov h1, v0.h[1]
; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
diff --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index f6eeeef4faf7ed..636f14ceed8d45 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -181,13 +181,10 @@ define <8 x i16> @concat_v8s16_v2s16(ptr %ptr) {
;
; CHECK-GI-LABEL: concat_v8s16_v2s16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: dup v0.4s, w8
-; CHECK-GI-NEXT: ldr h1, [x0]
-; CHECK-GI-NEXT: ldr h2, [x0, #2]
-; CHECK-GI-NEXT: mov v1.s[1], v2.s[0]
-; CHECK-GI-NEXT: xtn v2.4h, v0.4s
-; CHECK-GI-NEXT: xtn v0.4h, v1.4s
-; CHECK-GI-NEXT: fmov w8, s2
+; CHECK-GI-NEXT: ldr h0, [x0]
+; CHECK-GI-NEXT: ldr h1, [x0, #2]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NEXT: mov v0.s[1], w8
; CHECK-GI-NEXT: mov v0.s[2], w8
; CHECK-GI-NEXT: mov v0.s[3], w8
@@ -205,12 +202,10 @@ define <16 x i8> @concat_v16s8_v4s8(ptr %ptr) {
;
; CHECK-GI-LABEL: concat_v16s8_v4s8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: dup v0.8h, w8
-; CHECK-GI-NEXT: xtn v1.8b, v0.8h
; CHECK-GI-NEXT: ldr s0, [x0]
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
-; CHECK-GI-NEXT: mov v0.s[2], v1.s[0]
-; CHECK-GI-NEXT: mov v0.s[3], v1.s[0]
+; CHECK-GI-NEXT: mov v0.s[1], v0.s[0]
+; CHECK-GI-NEXT: mov v0.s[2], v0.s[0]
+; CHECK-GI-NEXT: mov v0.s[3], v0.s[0]
; CHECK-GI-NEXT: ret
%a = load <4 x i8>, ptr %ptr
%b = shufflevector <4 x i8> %a, <4 x i8> %a, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index 2ea7e0f3c44a9a..a9618fdc2dec30 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -3,22 +3,10 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -global-isel %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <4 x half> @interleave2_v4f16(<2 x half> %vec0, <2 x half> %vec1) {
-; CHECK-SD-LABEL: interleave2_v4f16:
-; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: zip1 v0.4h, v0.4h, v1.4h
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: interleave2_v4f16:
-; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: dup v2.4s, w8
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: xtn v2.4h, v2.4s
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: mov v0.s[1], w8
-; CHECK-GI-NEXT: mov v1.s[1], w8
-; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: interleave2_v4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT: ret
%retval = call <4 x half> @llvm.vector.interleave2.v4f16(<2 x half> %vec0, <2 x half> %vec1)
ret <4 x half> %retval
}
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 0c880592d955b7..78990aecfcce83 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -3172,42 +3172,22 @@ entry:
}
define <3 x i16> @fptos_v3f32_v3i16(<3 x float> %a) {
-; CHECK-SD-LABEL: fptos_v3f32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptos_v3f32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-GI-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NEXT: mov s2, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptos_v3f32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptosi <3 x float> %a to <3 x i16>
ret <3 x i16> %c
}
define <3 x i16> @fptou_v3f32_v3i16(<3 x float> %a) {
-; CHECK-SD-LABEL: fptou_v3f32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: fptou_v3f32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-GI-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NEXT: mov s2, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: fptou_v3f32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu v0.4s, v0.4s
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%c = fptoui <3 x float> %a to <3 x i16>
ret <3 x i16> %c
@@ -6077,11 +6057,7 @@ define <3 x i16> @fptos_v3f16_v3i16(<3 x half> %a) {
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fcvtl v0.4s, v0.4h
; CHECK-GI-NOFP16-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NOFP16-NEXT: mov s2, v0.s[2]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
;
; CHECK-GI-FP16-LABEL: fptos_v3f16_v3i16:
@@ -6110,11 +6086,7 @@ define <3 x i16> @fptou_v3f16_v3i16(<3 x half> %a) {
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fcvtl v0.4s, v0.4h
; CHECK-GI-NOFP16-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NOFP16-NEXT: mov s2, v0.s[2]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-GI-NOFP16-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NOFP16-NEXT: xtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
;
; CHECK-GI-FP16-LABEL: fptou_v3f16_v3i16:
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index 7a4c5cee27b805..bb2b91dd38a339 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -7458,9 +7458,7 @@ define <2 x half> @stofp_v2i16_v2f16(<2 x i16> %a) {
;
; CHECK-GI-FP16-LABEL: stofp_v2i16_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -7501,9 +7499,7 @@ define <2 x half> @utofp_v2i16_v2f16(<2 x i16> %a) {
;
; CHECK-GI-FP16-LABEL: utofp_v2i16_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
@@ -8067,8 +8063,7 @@ define <2 x half> @utofp_v2i8_v2f16(<2 x i8> %a) {
; CHECK-GI-FP16-NEXT: movi d1, #0x0000ff000000ff
; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
diff --git a/llvm/test/CodeGen/AArch64/shift.ll b/llvm/test/CodeGen/AArch64/shift.ll
index 9c8d3e0f07de87..9c2c8021711ce4 100644
--- a/llvm/test/CodeGen/AArch64/shift.ll
+++ b/llvm/test/CodeGen/AArch64/shift.ll
@@ -531,20 +531,8 @@ define <4 x i8> @shl_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: shl_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v0.h[1]
-; CHECK-GI-NEXT: mov h3, v1.h[1]
-; CHECK-GI-NEXT: mov h4, v0.h[2]
-; CHECK-GI-NEXT: mov h5, v0.h[3]
-; CHECK-GI-NEXT: mov h6, v1.h[3]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov h2, v1.h[2]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov v0.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov v1.b[2], v2.b[0]
-; CHECK-GI-NEXT: mov v0.b[3], v5.b[0]
-; CHECK-GI-NEXT: mov v1.b[3], v6.b[0]
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
; CHECK-GI-NEXT: mov b2, v0.b[2]
@@ -585,12 +573,8 @@ define <2 x i16> @shl_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: shl_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov s2, v0.s[1]
-; CHECK-GI-NEXT: mov s3, v1.s[1]
-; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
@@ -679,20 +663,8 @@ define <4 x i8> @ashr_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: ashr_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v1.h[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov h3, v0.h[1]
-; CHECK-GI-NEXT: mov h4, v1.h[2]
-; CHECK-GI-NEXT: mov h5, v1.h[3]
-; CHECK-GI-NEXT: mov h6, v0.h[3]
-; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov h2, v0.h[2]
-; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
-; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: neg v1.8b, v1.8b
; CHECK-GI-NEXT: sshl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
@@ -733,12 +705,8 @@ define <2 x i16> @ashr_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: ashr_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov s2, v1.s[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov s3, v0.s[1]
-; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: neg v1.4h, v1.4h
; CHECK-GI-NEXT: sshl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
@@ -816,20 +784,8 @@ define <4 x i8> @lshr_v4i8(<4 x i8> %0, <4 x i8> %1){
;
; CHECK-GI-LABEL: lshr_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v1.h[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov h3, v0.h[1]
-; CHECK-GI-NEXT: mov h4, v1.h[2]
-; CHECK-GI-NEXT: mov h5, v1.h[3]
-; CHECK-GI-NEXT: mov h6, v0.h[3]
-; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov h2, v0.h[2]
-; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
-; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
-; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: neg v1.8b, v1.8b
; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
; CHECK-GI-NEXT: mov b1, v0.b[1]
@@ -869,12 +825,8 @@ define <2 x i16> @lshr_v2i16(<2 x i16> %0, <2 x i16> %1){
;
; CHECK-GI-LABEL: lshr_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov s2, v1.s[1]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: mov s3, v0.s[1]
-; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: neg v1.4h, v1.4h
; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
; CHECK-GI-NEXT: mov h1, v0.h[1]
diff --git a/llvm/test/CodeGen/AArch64/shufflevector.ll b/llvm/test/CodeGen/AArch64/shufflevector.ll
index b1131f287fe9a9..8e77f94db5069d 100644
--- a/llvm/test/CodeGen/AArch64/shufflevector.ll
+++ b/llvm/test/CodeGen/AArch64/shufflevector.ll
@@ -209,21 +209,9 @@ define i32 @shufflevector_v4i8(<4 x i8> %a, <4 x i8> %b){
;
; CHECK-GI-LABEL: shufflevector_v4i8:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov h2, v0.h[1]
-; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
; CHECK-GI-NEXT: adrp x8, .LCPI15_0
-; CHECK-GI-NEXT: mov h4, v0.h[2]
-; CHECK-GI-NEXT: mov h5, v0.h[3]
-; CHECK-GI-NEXT: mov h6, v1.h[3]
-; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
-; CHECK-GI-NEXT: mov h2, v1.h[2]
-; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
-; CHECK-GI-NEXT: mov v0.b[2], v4.b[0]
-; CHECK-GI-NEXT: mov v1.b[2], v2.b[0]
-; CHECK-GI-NEXT: mov v0.b[3], v5.b[0]
-; CHECK-GI-NEXT: mov v1.b[3], v6.b[0]
+; CHECK-GI-NEXT: uzp1 v1.8b, v1.8b, v0.8b
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI15_0]
; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
@@ -278,13 +266,9 @@ define i32 @shufflevector_v2i16(<2 x i16> %a, <2 x i16> %b){
;
; CHECK-GI-LABEL: shufflevector_v2i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-GI-NEXT: mov s2, v0.s[1]
-; CHECK-GI-NEXT: mov s3, v1.s[1]
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-GI-NEXT: adrp x8, .LCPI17_0
-; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
-; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: uzp1 v1.4h, v1.4h, v0.4h
; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI17_0]
; CHECK-GI-NEXT: tbl v0.16b, { v0.16b }, v1.16b
@@ -397,8 +381,8 @@ define i32 @shufflevector_v4i8_zeroes(<4 x i8> %a, <4 x i8> %b){
;
; CHECK-GI-LABEL: shufflevector_v4i8_zeroes:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: dup v0.8b, w8
+; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-GI-NEXT: dup v0.8b, v0.b[0]
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%c = shufflevector <4 x i8> %a, <4 x i8> %b, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -433,8 +417,8 @@ define i32 @shufflevector_v2i16_zeroes(<2 x i16> %a, <2 x i16> %b){
;
; CHECK-GI-LABEL: shufflevector_v2i16_zeroes:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: dup v0.4h, w8
+; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: dup v0.4h, v0.h[0]
; CHECK-GI-NEXT: fmov w0, s0
; CHECK-GI-NEXT: ret
%c = shufflevector <2 x i16> %a, <2 x i16> %b, <2 x i32> <i32 0, i32 0>
diff --git a/llvm/test/CodeGen/AArch64/xtn.ll b/llvm/test/CodeGen/AArch64/xtn.ll
index e536ba240453e2..f5ef81059d9475 100644
--- a/llvm/test/CodeGen/AArch64/xtn.ll
+++ b/llvm/test/CodeGen/AArch64/xtn.ll
@@ -275,19 +275,10 @@ entry:
}
define <3 x i16> @xtn_v3i32_v3i16(<3 x i32> %a) {
-; CHECK-SD-LABEL: xtn_v3i32_v3i16:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: xtn v0.4h, v0.4s
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: xtn_v3i32_v3i16:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov s1, v0.s[1]
-; CHECK-GI-NEXT: mov s2, v0.s[2]
-; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
-; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: xtn_v3i32_v3i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: xtn v0.4h, v0.4s
+; CHECK-NEXT: ret
entry:
%arg1 = trunc <3 x i32> %a to <3 x i16>
ret <3 x i16> %arg1
More information about the llvm-commits
mailing list