[llvm] add9079 - [LoongArch] Broadcast repeated subsequence in build_vector instead of inserting per element (#154533)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 22 01:04:43 PDT 2025
Author: ZhaoQi
Date: 2025-09-22T16:04:39+08:00
New Revision: add9079dcf0f0a34e03e5453e754d8fc40116785
URL: https://github.com/llvm/llvm-project/commit/add9079dcf0f0a34e03e5453e754d8fc40116785
DIFF: https://github.com/llvm/llvm-project/commit/add9079dcf0f0a34e03e5453e754d8fc40116785.diff
LOG: [LoongArch] Broadcast repeated subsequence in build_vector instead of inserting per element (#154533)
Added:
Modified:
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/lib/Target/LoongArch/LoongArchISelLowering.h
llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 5b5a33149506a..dad5758f8d054 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2650,6 +2650,7 @@ static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp,
SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
+ MVT VT = Node->getSimpleValueType(0);
EVT ResTy = Op->getValueType(0);
unsigned NumElts = ResTy.getVectorNumElements();
SDLoc DL(Op);
@@ -2744,6 +2745,66 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
}
if (!IsConstant) {
+ // If the BUILD_VECTOR has a repeated pattern, use INSERT_VECTOR_ELT to fill
+ // the sub-sequence of the vector and then broadcast the sub-sequence.
+ //
+ // TODO: If the BUILD_VECTOR contains undef elements, consider falling
+ // back to use INSERT_VECTOR_ELT to materialize the vector, because it
+ // generates worse code in some cases. This could be further optimized
+ // with more consideration.
+ SmallVector<SDValue> Sequence;
+ BitVector UndefElements;
+ if (Node->getRepeatedSequence(Sequence, &UndefElements) &&
+ UndefElements.count() == 0) {
+ SDValue Vector = DAG.getUNDEF(ResTy);
+ SDValue FillVec = Vector;
+ EVT FillTy = ResTy;
+
+ // Using LSX instructions to fill the sub-sequence of 256-bits vector,
+ // because the high part can be simply treated as undef.
+ if (Is256Vec) {
+ FillTy = ResTy.getHalfNumVectorElementsVT(*DAG.getContext());
+ FillVec = DAG.getExtractSubvector(DL, FillTy, Vector, 0);
+ }
+
+ SDValue Op0 = Sequence[0];
+ unsigned SeqLen = Sequence.size();
+ if (!Op0.isUndef())
+ FillVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, FillTy, Op0);
+ for (unsigned i = 1; i < SeqLen; ++i) {
+ SDValue Opi = Sequence[i];
+ if (Opi.isUndef())
+ continue;
+ FillVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, FillTy, FillVec, Opi,
+ DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+ }
+
+ unsigned SplatLen = NumElts / SeqLen;
+ MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
+ MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);
+
+ // If size of the sub-sequence is half of a 256-bits vector, bitcast the
+ // vector to v4i64 type in order to match the pattern of XVREPLVE0Q.
+ if (SplatEltTy == MVT::i128)
+ SplatTy = MVT::v4i64;
+
+ SDValue SplatVec;
+ SDValue SrcVec = DAG.getBitcast(
+ SplatTy,
+ Is256Vec ? DAG.getInsertSubvector(DL, Vector, FillVec, 0) : FillVec);
+ if (Is256Vec) {
+ SplatVec =
+ DAG.getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
+ : LoongArchISD::XVREPLVE0,
+ DL, SplatTy, SrcVec);
+ } else {
+ SplatVec = DAG.getNode(LoongArchISD::VREPLVEI, DL, SplatTy, SrcVec,
+ DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
+ }
+
+ return DAG.getBitcast(ResTy, SplatVec);
+ }
+
// Use INSERT_VECTOR_ELT operations rather than expand to stores.
// The resulting code is the same length as the expansion, but it doesn't
// use memory operations.
@@ -7110,6 +7171,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(VREPLGR2VR)
NODE_NAME_CASE(XVPERMI)
NODE_NAME_CASE(XVPERM)
+ NODE_NAME_CASE(XVREPLVE0)
+ NODE_NAME_CASE(XVREPLVE0Q)
NODE_NAME_CASE(VPICK_SEXT_ELT)
NODE_NAME_CASE(VPICK_ZEXT_ELT)
NODE_NAME_CASE(VREPLVE)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 9d14934a9d363..8df3c13f26fea 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -146,6 +146,8 @@ enum NodeType : unsigned {
VREPLGR2VR,
XVPERMI,
XVPERM,
+ XVREPLVE0,
+ XVREPLVE0Q,
// Extended vector element extraction
VPICK_SEXT_ELT,
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index e974597e28be6..adfe990ba1234 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -12,10 +12,14 @@
def SDT_LoongArchXVPERM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
SDTCisVec<2>, SDTCisInt<2>]>;
+def SDT_LoongArchXVREPLVE0 : SDTypeProfile<1, 1, [SDTCisVec<0>,
+ SDTCisSameAs<0, 1>]>;
// Target nodes.
def loongarch_xvpermi: SDNode<"LoongArchISD::XVPERMI", SDT_LoongArchV1RUimm>;
def loongarch_xvperm: SDNode<"LoongArchISD::XVPERM", SDT_LoongArchXVPERM>;
+def loongarch_xvreplve0: SDNode<"LoongArchISD::XVREPLVE0", SDT_LoongArchXVREPLVE0>;
+def loongarch_xvreplve0q: SDNode<"LoongArchISD::XVREPLVE0Q", SDT_LoongArchXVREPLVE0>;
def loongarch_xvmskltz: SDNode<"LoongArchISD::XVMSKLTZ", SDT_LoongArchVMSKCOND>;
def loongarch_xvmskgez: SDNode<"LoongArchISD::XVMSKGEZ", SDT_LoongArchVMSKCOND>;
def loongarch_xvmskeqz: SDNode<"LoongArchISD::XVMSKEQZ", SDT_LoongArchVMSKCOND>;
@@ -1886,11 +1890,26 @@ def : Pat<(loongarch_xvperm v8i32:$xj, v8i32:$xk),
def : Pat<(loongarch_xvperm v8f32:$xj, v8i32:$xk),
(XVPERM_W v8f32:$xj, v8i32:$xk)>;
-// XVREPLVE0_{W/D}
+// XVREPLVE0_{B/H/W/D/Q}
+def : Pat<(loongarch_xvreplve0 v32i8:$xj),
+ (XVREPLVE0_B v32i8:$xj)>;
+def : Pat<(loongarch_xvreplve0 v16i16:$xj),
+ (XVREPLVE0_H v16i16:$xj)>;
+def : Pat<(loongarch_xvreplve0 v8i32:$xj),
+ (XVREPLVE0_W v8i32:$xj)>;
+def : Pat<(loongarch_xvreplve0 v4i64:$xj),
+ (XVREPLVE0_D v4i64:$xj)>;
+def : Pat<(loongarch_xvreplve0 v8f32:$xj),
+ (XVREPLVE0_W v8f32:$xj)>;
+def : Pat<(loongarch_xvreplve0 v4f64:$xj),
+ (XVREPLVE0_D v4f64:$xj)>;
def : Pat<(lasxsplatf32 FPR32:$fj),
(XVREPLVE0_W (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32))>;
def : Pat<(lasxsplatf64 FPR64:$fj),
(XVREPLVE0_D (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64))>;
+foreach vt = [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64] in
+ def : Pat<(vt (loongarch_xvreplve0q LASX256:$xj)),
+ (XVREPLVE0_Q LASX256:$xj)>;
// VSTELM
defm : VstelmPat<truncstorei8, v32i8, XVSTELM_B, simm8, uimm5>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 27966156b6f62..d99a57e562528 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -82,11 +82,11 @@ def loongarch_vmskgez: SDNode<"LoongArchISD::VMSKGEZ", SDT_LoongArchVMSKCOND>;
def loongarch_vmskeqz: SDNode<"LoongArchISD::VMSKEQZ", SDT_LoongArchVMSKCOND>;
def loongarch_vmsknez: SDNode<"LoongArchISD::VMSKNEZ", SDT_LoongArchVMSKCOND>;
-def immZExt1 : ImmLeaf<i64, [{return isUInt<1>(Imm);}]>;
-def immZExt2 : ImmLeaf<i64, [{return isUInt<2>(Imm);}]>;
-def immZExt3 : ImmLeaf<i64, [{return isUInt<3>(Imm);}]>;
-def immZExt4 : ImmLeaf<i64, [{return isUInt<4>(Imm);}]>;
-def immZExt8 : ImmLeaf<i64, [{return isUInt<8>(Imm);}]>;
+def immZExt1 : ImmLeaf<GRLenVT, [{return isUInt<1>(Imm);}]>;
+def immZExt2 : ImmLeaf<GRLenVT, [{return isUInt<2>(Imm);}]>;
+def immZExt3 : ImmLeaf<GRLenVT, [{return isUInt<3>(Imm);}]>;
+def immZExt4 : ImmLeaf<GRLenVT, [{return isUInt<4>(Imm);}]>;
+def immZExt8 : ImmLeaf<GRLenVT, [{return isUInt<8>(Imm);}]>;
class VecCond<SDPatternOperator OpNode, ValueType TyNode,
RegisterClass RC = LSX128>
@@ -2026,15 +2026,15 @@ def : Pat<(loongarch_vilvh v4f32:$vj, v4f32:$vk),
def : Pat<(loongarch_vilvh v2f64:$vj, v2f64:$vk),
(VILVH_D v2f64:$vj, v2f64:$vk)>;
-// VSHUF4I_{B/H/W}
+// VSHUF4I_{B/H/W/D}
def : Pat<(loongarch_vshuf4i v16i8:$vj, immZExt8:$ui8),
(VSHUF4I_B v16i8:$vj, immZExt8:$ui8)>;
def : Pat<(loongarch_vshuf4i v8i16:$vj, immZExt8:$ui8),
- (VSHUF4I_H v8i16:$vj, immZExt8:$ui8)>;
+ (VSHUF4I_H v8i16:$vj, immZExt8:$ui8)>;
def : Pat<(loongarch_vshuf4i v4i32:$vj, immZExt8:$ui8),
- (VSHUF4I_W v4i32:$vj, immZExt8:$ui8)>;
+ (VSHUF4I_W v4i32:$vj, immZExt8:$ui8)>;
def : Pat<(loongarch_vshuf4i v4f32:$vj, immZExt8:$ui8),
- (VSHUF4I_W v4f32:$vj, immZExt8:$ui8)>;
+ (VSHUF4I_W v4f32:$vj, immZExt8:$ui8)>;
def : Pat<(loongarch_vshuf4i_d v2i64:$vj, v2i64:$vk, immZExt8:$ui8),
(VSHUF4I_D v2i64:$vj, v2i64:$vk, immZExt8:$ui8)>;
def : Pat<(loongarch_vshuf4i_d v2f64:$vj, v2f64:$vk, immZExt8:$ui8),
@@ -2044,15 +2044,15 @@ def : Pat<(loongarch_vshuf4i_d v2f64:$vj, v2f64:$vk, immZExt8:$ui8),
def : Pat<(loongarch_vreplvei v16i8:$vj, immZExt4:$ui4),
(VREPLVEI_B v16i8:$vj, immZExt4:$ui4)>;
def : Pat<(loongarch_vreplvei v8i16:$vj, immZExt3:$ui3),
- (VREPLVEI_H v8i16:$vj, immZExt3:$ui3)>;
+ (VREPLVEI_H v8i16:$vj, immZExt3:$ui3)>;
def : Pat<(loongarch_vreplvei v4i32:$vj, immZExt2:$ui2),
- (VREPLVEI_W v4i32:$vj, immZExt2:$ui2)>;
+ (VREPLVEI_W v4i32:$vj, immZExt2:$ui2)>;
def : Pat<(loongarch_vreplvei v2i64:$vj, immZExt1:$ui1),
- (VREPLVEI_D v2i64:$vj, immZExt1:$ui1)>;
+ (VREPLVEI_D v2i64:$vj, immZExt1:$ui1)>;
def : Pat<(loongarch_vreplvei v4f32:$vj, immZExt2:$ui2),
- (VREPLVEI_W v4f32:$vj, immZExt2:$ui2)>;
+ (VREPLVEI_W v4f32:$vj, immZExt2:$ui2)>;
def : Pat<(loongarch_vreplvei v2f64:$vj, immZExt1:$ui1),
- (VREPLVEI_D v2f64:$vj, immZExt1:$ui1)>;
+ (VREPLVEI_D v2f64:$vj, immZExt1:$ui1)>;
// VREPLVEI_{W/D}
def : Pat<(lsxsplatf32 FPR32:$fj),
diff --git a/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll b/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
index 4aa2bd76ab80c..7407e0d5f4195 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
@@ -9,14 +9,9 @@ define <4 x i64> @should_not_be_optimized(ptr %ptr, ptr %dst) {
; LA32-NEXT: ld.w $a2, $a0, 0
; LA32-NEXT: ld.w $a0, $a0, 4
; LA32-NEXT: st.w $a2, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT: xvreplve0.d $xr0, $xr0
; LA32-NEXT: st.w $a0, $a1, 4
; LA32-NEXT: ret
;
@@ -64,14 +59,9 @@ define <4 x i64> @xvldrepl_d_unaligned_offset(ptr %ptr) {
; LA32: # %bb.0:
; LA32-NEXT: ld.w $a1, $a0, 4
; LA32-NEXT: ld.w $a0, $a0, 8
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT: xvreplve0.d $xr0, $xr0
; LA32-NEXT: ret
;
; LA64-LABEL: xvldrepl_d_unaligned_offset:
@@ -162,14 +152,9 @@ define <4 x i64> @xvldrepl_d(ptr %ptr) {
; LA32: # %bb.0:
; LA32-NEXT: ld.w $a1, $a0, 0
; LA32-NEXT: ld.w $a0, $a0, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT: xvreplve0.d $xr0, $xr0
; LA32-NEXT: ret
;
; LA64-LABEL: xvldrepl_d:
@@ -187,14 +172,9 @@ define <4 x i64> @xvldrepl_d_offset(ptr %ptr) {
; LA32: # %bb.0:
; LA32-NEXT: ld.w $a1, $a0, 264
; LA32-NEXT: ld.w $a0, $a0, 268
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT: xvreplve0.d $xr0, $xr0
; LA32-NEXT: ret
;
; LA64-LABEL: xvldrepl_d_offset:
diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
index d6756b9395237..52bc1b2696085 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
@@ -44,14 +44,9 @@ entry:
define void @buildvector_v4i64_splat(ptr %dst, i64 %a0) nounwind {
; LA32-LABEL: buildvector_v4i64_splat:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 1
+; LA32-NEXT: xvreplve0.d $xr0, $xr0
; LA32-NEXT: xvst $xr0, $a0, 0
; LA32-NEXT: ret
;
@@ -937,100 +932,23 @@ define void @buildvector_v32i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
; LA32-NEXT: ld.b $t6, $sp, 8
; LA32-NEXT: ld.b $t7, $sp, 4
; LA32-NEXT: ld.b $t8, $sp, 0
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a2
; LA32-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA32-NEXT: xvori.b $xr3, $xr1, 0
-; LA32-NEXT: xvpermi.q $xr3, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr2, $a3
-; LA32-NEXT: xvextrins.b $xr0, $xr3, 17
-; LA32-NEXT: xvori.b $xr4, $xr2, 0
-; LA32-NEXT: xvpermi.q $xr4, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr3, $a4
-; LA32-NEXT: xvextrins.b $xr0, $xr4, 34
-; LA32-NEXT: xvori.b $xr5, $xr3, 0
-; LA32-NEXT: xvpermi.q $xr5, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr4, $a5
-; LA32-NEXT: xvextrins.b $xr0, $xr5, 51
-; LA32-NEXT: xvori.b $xr6, $xr4, 0
-; LA32-NEXT: xvpermi.q $xr6, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr5, $a6
-; LA32-NEXT: xvextrins.b $xr0, $xr6, 68
-; LA32-NEXT: xvori.b $xr7, $xr5, 0
-; LA32-NEXT: xvpermi.q $xr7, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr6, $a7
-; LA32-NEXT: xvextrins.b $xr0, $xr7, 85
-; LA32-NEXT: xvori.b $xr8, $xr6, 0
-; LA32-NEXT: xvpermi.q $xr8, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr7, $t8
-; LA32-NEXT: xvextrins.b $xr0, $xr8, 102
-; LA32-NEXT: xvori.b $xr9, $xr7, 0
-; LA32-NEXT: xvpermi.q $xr9, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr8, $t7
-; LA32-NEXT: xvextrins.b $xr0, $xr9, 119
-; LA32-NEXT: xvori.b $xr10, $xr8, 0
-; LA32-NEXT: xvpermi.q $xr10, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr9, $t6
-; LA32-NEXT: xvextrins.b $xr0, $xr10, 136
-; LA32-NEXT: xvori.b $xr11, $xr9, 0
-; LA32-NEXT: xvpermi.q $xr11, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr10, $t5
-; LA32-NEXT: xvextrins.b $xr0, $xr11, 153
-; LA32-NEXT: xvori.b $xr12, $xr10, 0
-; LA32-NEXT: xvpermi.q $xr12, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr11, $t4
-; LA32-NEXT: xvextrins.b $xr0, $xr12, 170
-; LA32-NEXT: xvori.b $xr13, $xr11, 0
-; LA32-NEXT: xvpermi.q $xr13, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr12, $t3
-; LA32-NEXT: xvextrins.b $xr0, $xr13, 187
-; LA32-NEXT: xvori.b $xr14, $xr12, 0
-; LA32-NEXT: xvpermi.q $xr14, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr13, $t2
-; LA32-NEXT: xvextrins.b $xr0, $xr14, 204
-; LA32-NEXT: xvori.b $xr15, $xr13, 0
-; LA32-NEXT: xvpermi.q $xr15, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr14, $t1
-; LA32-NEXT: xvextrins.b $xr0, $xr15, 221
-; LA32-NEXT: xvori.b $xr16, $xr14, 0
-; LA32-NEXT: xvpermi.q $xr16, $xr0, 18
-; LA32-NEXT: xvreplgr2vr.b $xr15, $t0
-; LA32-NEXT: xvextrins.b $xr0, $xr16, 238
-; LA32-NEXT: xvori.b $xr16, $xr15, 0
-; LA32-NEXT: xvpermi.q $xr16, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr16, 255
-; LA32-NEXT: xvreplgr2vr.b $xr16, $a1
-; LA32-NEXT: xvpermi.q $xr16, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr16, 0
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA32-NEXT: xvpermi.q $xr2, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr2, 34
-; LA32-NEXT: xvpermi.q $xr3, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr3, 51
-; LA32-NEXT: xvpermi.q $xr4, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr4, 68
-; LA32-NEXT: xvpermi.q $xr5, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr5, 85
-; LA32-NEXT: xvpermi.q $xr6, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr6, 102
-; LA32-NEXT: xvpermi.q $xr7, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr7, 119
-; LA32-NEXT: xvpermi.q $xr8, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr8, 136
-; LA32-NEXT: xvpermi.q $xr9, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr9, 153
-; LA32-NEXT: xvpermi.q $xr10, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr10, 170
-; LA32-NEXT: xvpermi.q $xr11, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr11, 187
-; LA32-NEXT: xvpermi.q $xr12, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr12, 204
-; LA32-NEXT: xvpermi.q $xr13, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr13, 221
-; LA32-NEXT: xvpermi.q $xr14, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr14, 238
-; LA32-NEXT: xvpermi.q $xr15, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr15, 255
+; LA32-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; LA32-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; LA32-NEXT: vinsgr2vr.b $vr0, $a4, 3
+; LA32-NEXT: vinsgr2vr.b $vr0, $a5, 4
+; LA32-NEXT: vinsgr2vr.b $vr0, $a6, 5
+; LA32-NEXT: vinsgr2vr.b $vr0, $a7, 6
+; LA32-NEXT: vinsgr2vr.b $vr0, $t8, 7
+; LA32-NEXT: vinsgr2vr.b $vr0, $t7, 8
+; LA32-NEXT: vinsgr2vr.b $vr0, $t6, 9
+; LA32-NEXT: vinsgr2vr.b $vr0, $t5, 10
+; LA32-NEXT: vinsgr2vr.b $vr0, $t4, 11
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 12
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 13
+; LA32-NEXT: vinsgr2vr.b $vr0, $t1, 14
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 15
+; LA32-NEXT: xvreplve0.q $xr0, $xr0
; LA32-NEXT: xvst $xr0, $a0, 0
; LA32-NEXT: ret
;
@@ -1045,100 +963,23 @@ define void @buildvector_v32i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
; LA64-NEXT: ld.b $t6, $sp, 16
; LA64-NEXT: ld.b $t7, $sp, 8
; LA64-NEXT: ld.b $t8, $sp, 0
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a2
; LA64-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA64-NEXT: xvori.b $xr3, $xr1, 0
-; LA64-NEXT: xvpermi.q $xr3, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr2, $a3
-; LA64-NEXT: xvextrins.b $xr0, $xr3, 17
-; LA64-NEXT: xvori.b $xr4, $xr2, 0
-; LA64-NEXT: xvpermi.q $xr4, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr3, $a4
-; LA64-NEXT: xvextrins.b $xr0, $xr4, 34
-; LA64-NEXT: xvori.b $xr5, $xr3, 0
-; LA64-NEXT: xvpermi.q $xr5, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr4, $a5
-; LA64-NEXT: xvextrins.b $xr0, $xr5, 51
-; LA64-NEXT: xvori.b $xr6, $xr4, 0
-; LA64-NEXT: xvpermi.q $xr6, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr5, $a6
-; LA64-NEXT: xvextrins.b $xr0, $xr6, 68
-; LA64-NEXT: xvori.b $xr7, $xr5, 0
-; LA64-NEXT: xvpermi.q $xr7, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr6, $a7
-; LA64-NEXT: xvextrins.b $xr0, $xr7, 85
-; LA64-NEXT: xvori.b $xr8, $xr6, 0
-; LA64-NEXT: xvpermi.q $xr8, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr7, $t8
-; LA64-NEXT: xvextrins.b $xr0, $xr8, 102
-; LA64-NEXT: xvori.b $xr9, $xr7, 0
-; LA64-NEXT: xvpermi.q $xr9, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr8, $t7
-; LA64-NEXT: xvextrins.b $xr0, $xr9, 119
-; LA64-NEXT: xvori.b $xr10, $xr8, 0
-; LA64-NEXT: xvpermi.q $xr10, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr9, $t6
-; LA64-NEXT: xvextrins.b $xr0, $xr10, 136
-; LA64-NEXT: xvori.b $xr11, $xr9, 0
-; LA64-NEXT: xvpermi.q $xr11, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr10, $t5
-; LA64-NEXT: xvextrins.b $xr0, $xr11, 153
-; LA64-NEXT: xvori.b $xr12, $xr10, 0
-; LA64-NEXT: xvpermi.q $xr12, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr11, $t4
-; LA64-NEXT: xvextrins.b $xr0, $xr12, 170
-; LA64-NEXT: xvori.b $xr13, $xr11, 0
-; LA64-NEXT: xvpermi.q $xr13, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr12, $t3
-; LA64-NEXT: xvextrins.b $xr0, $xr13, 187
-; LA64-NEXT: xvori.b $xr14, $xr12, 0
-; LA64-NEXT: xvpermi.q $xr14, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr13, $t2
-; LA64-NEXT: xvextrins.b $xr0, $xr14, 204
-; LA64-NEXT: xvori.b $xr15, $xr13, 0
-; LA64-NEXT: xvpermi.q $xr15, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr14, $t1
-; LA64-NEXT: xvextrins.b $xr0, $xr15, 221
-; LA64-NEXT: xvori.b $xr16, $xr14, 0
-; LA64-NEXT: xvpermi.q $xr16, $xr0, 18
-; LA64-NEXT: xvreplgr2vr.b $xr15, $t0
-; LA64-NEXT: xvextrins.b $xr0, $xr16, 238
-; LA64-NEXT: xvori.b $xr16, $xr15, 0
-; LA64-NEXT: xvpermi.q $xr16, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr16, 255
-; LA64-NEXT: xvreplgr2vr.b $xr16, $a1
-; LA64-NEXT: xvpermi.q $xr16, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr16, 0
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA64-NEXT: xvpermi.q $xr2, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr2, 34
-; LA64-NEXT: xvpermi.q $xr3, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr3, 51
-; LA64-NEXT: xvpermi.q $xr4, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr4, 68
-; LA64-NEXT: xvpermi.q $xr5, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr5, 85
-; LA64-NEXT: xvpermi.q $xr6, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr6, 102
-; LA64-NEXT: xvpermi.q $xr7, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr7, 119
-; LA64-NEXT: xvpermi.q $xr8, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr8, 136
-; LA64-NEXT: xvpermi.q $xr9, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr9, 153
-; LA64-NEXT: xvpermi.q $xr10, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr10, 170
-; LA64-NEXT: xvpermi.q $xr11, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr11, 187
-; LA64-NEXT: xvpermi.q $xr12, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr12, 204
-; LA64-NEXT: xvpermi.q $xr13, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr13, 221
-; LA64-NEXT: xvpermi.q $xr14, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr14, 238
-; LA64-NEXT: xvpermi.q $xr15, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr15, 255
+; LA64-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; LA64-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; LA64-NEXT: vinsgr2vr.b $vr0, $a4, 3
+; LA64-NEXT: vinsgr2vr.b $vr0, $a5, 4
+; LA64-NEXT: vinsgr2vr.b $vr0, $a6, 5
+; LA64-NEXT: vinsgr2vr.b $vr0, $a7, 6
+; LA64-NEXT: vinsgr2vr.b $vr0, $t8, 7
+; LA64-NEXT: vinsgr2vr.b $vr0, $t7, 8
+; LA64-NEXT: vinsgr2vr.b $vr0, $t6, 9
+; LA64-NEXT: vinsgr2vr.b $vr0, $t5, 10
+; LA64-NEXT: vinsgr2vr.b $vr0, $t4, 11
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 12
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 13
+; LA64-NEXT: vinsgr2vr.b $vr0, $t1, 14
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 15
+; LA64-NEXT: xvreplve0.q $xr0, $xr0
; LA64-NEXT: xvst $xr0, $a0, 0
; LA64-NEXT: ret
entry:
@@ -1182,100 +1023,15 @@ define void @buildvector_v32i8_subseq_4(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
; CHECK-LABEL: buildvector_v32i8_subseq_4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld.b $t0, $sp, 0
-; CHECK-NEXT: xvreplgr2vr.b $xr1, $a2
; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr2, $a3
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT: xvori.b $xr4, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr3, $a4
-; CHECK-NEXT: xvextrins.b $xr0, $xr4, 34
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr4, $a5
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT: xvori.b $xr6, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr6, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr5, $a6
-; CHECK-NEXT: xvextrins.b $xr0, $xr6, 68
-; CHECK-NEXT: xvori.b $xr7, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr7, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr6, $a7
-; CHECK-NEXT: xvextrins.b $xr0, $xr7, 85
-; CHECK-NEXT: xvori.b $xr8, $xr6, 0
-; CHECK-NEXT: xvpermi.q $xr8, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr7, $t0
-; CHECK-NEXT: xvextrins.b $xr0, $xr8, 102
-; CHECK-NEXT: xvori.b $xr9, $xr7, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr8, $a1
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 119
-; CHECK-NEXT: xvori.b $xr9, $xr8, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 136
-; CHECK-NEXT: xvori.b $xr9, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 153
-; CHECK-NEXT: xvori.b $xr9, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 170
-; CHECK-NEXT: xvori.b $xr9, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 187
-; CHECK-NEXT: xvori.b $xr9, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 204
-; CHECK-NEXT: xvori.b $xr9, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 221
-; CHECK-NEXT: xvori.b $xr9, $xr6, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 238
-; CHECK-NEXT: xvori.b $xr9, $xr7, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 255
-; CHECK-NEXT: xvori.b $xr9, $xr8, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 0
-; CHECK-NEXT: xvori.b $xr9, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 17
-; CHECK-NEXT: xvori.b $xr9, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 34
-; CHECK-NEXT: xvori.b $xr9, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 51
-; CHECK-NEXT: xvori.b $xr9, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 68
-; CHECK-NEXT: xvori.b $xr9, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 85
-; CHECK-NEXT: xvori.b $xr9, $xr6, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 102
-; CHECK-NEXT: xvori.b $xr9, $xr7, 0
-; CHECK-NEXT: xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr9, 119
-; CHECK-NEXT: xvpermi.q $xr8, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr8, 136
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr1, 153
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr2, 170
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr4, 204
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 221
-; CHECK-NEXT: xvpermi.q $xr6, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr6, 238
-; CHECK-NEXT: xvpermi.q $xr7, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr7, 255
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 3
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 4
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 5
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 6
+; CHECK-NEXT: vinsgr2vr.b $vr0, $t0, 7
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1318,100 +1074,11 @@ entry:
define void @buildvector_v32i8_subseq_8(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3) nounwind {
; CHECK-LABEL: buildvector_v32i8_subseq_8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvreplgr2vr.b $xr1, $a2
; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr2, $a3
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT: xvori.b $xr4, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr3, $a4
-; CHECK-NEXT: xvextrins.b $xr0, $xr4, 34
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr4, $a1
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 68
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 85
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 102
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 119
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 136
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 153
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 170
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 187
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 204
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 221
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 238
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 255
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 0
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 17
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 34
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 68
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 85
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 102
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 119
-; CHECK-NEXT: xvori.b $xr5, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 136
-; CHECK-NEXT: xvori.b $xr5, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 153
-; CHECK-NEXT: xvori.b $xr5, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 170
-; CHECK-NEXT: xvori.b $xr5, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr5, 187
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr4, 204
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr1, 221
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr2, 238
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 255
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 3
+; CHECK-NEXT: xvreplve0.w $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1454,100 +1121,9 @@ entry:
define void @buildvector_v32i8_subseq_16(ptr %dst, i8 %a0, i8 %a1) nounwind {
; CHECK-LABEL: buildvector_v32i8_subseq_16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvreplgr2vr.b $xr1, $a2
; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.b $xr2, $a1
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 34
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 51
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 68
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 85
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 102
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 119
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 136
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 153
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 170
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 204
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 221
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 238
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 255
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 0
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 34
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 51
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 68
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 85
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 102
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 119
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 136
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 153
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 170
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT: xvori.b $xr3, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 204
-; CHECK-NEXT: xvori.b $xr3, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr3, 221
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr2, 238
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr1, 255
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT: xvreplve0.h $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1825,52 +1401,15 @@ define void @buildvector_v16i16_subseq_2(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i1
; CHECK-LABEL: buildvector_v16i16_subseq_2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld.h $t0, $sp, 0
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a2
; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
-; CHECK-NEXT: xvori.b $xr2, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr3, $a3
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr4, $a4
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT: xvori.b $xr2, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr5, $a5
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT: xvori.b $xr2, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr6, $a6
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 68
-; CHECK-NEXT: xvori.b $xr2, $xr6, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr7, $a7
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 85
-; CHECK-NEXT: xvori.b $xr2, $xr7, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr8, $t0
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 102
-; CHECK-NEXT: xvori.b $xr2, $xr8, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 119
-; CHECK-NEXT: xvreplgr2vr.h $xr2, $a1
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 0
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 17
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr3, 34
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr4, 51
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr5, 68
-; CHECK-NEXT: xvpermi.q $xr6, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr6, 85
-; CHECK-NEXT: xvpermi.q $xr7, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr7, 102
-; CHECK-NEXT: xvpermi.q $xr8, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr8, 119
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 3
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a5, 4
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a6, 5
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a7, 6
+; CHECK-NEXT: vinsgr2vr.h $vr0, $t0, 7
+; CHECK-NEXT: xvreplve0.q $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1897,52 +1436,11 @@ entry:
define void @buildvector_v16i16_subseq_4(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwind {
; CHECK-LABEL: buildvector_v16i16_subseq_4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a2
; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
-; CHECK-NEXT: xvori.b $xr2, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr3, $a3
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr4, $a4
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT: xvori.b $xr2, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr5, $a1
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT: xvori.b $xr2, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 68
-; CHECK-NEXT: xvori.b $xr2, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 85
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 102
-; CHECK-NEXT: xvori.b $xr2, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 119
-; CHECK-NEXT: xvori.b $xr2, $xr5, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 0
-; CHECK-NEXT: xvori.b $xr2, $xr1, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT: xvori.b $xr2, $xr4, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT: xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr5, 68
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 85
-; CHECK-NEXT: xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr3, 102
-; CHECK-NEXT: xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr4, 119
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 3
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1969,53 +1467,10 @@ entry:
define void @buildvector_v16i16_subseq_8(ptr %dst, i16 %a0, i16 %a1) nounwind {
; CHECK-LABEL: buildvector_v16i16_subseq_8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvreplgr2vr.h $xr0, $a2
-; CHECK-NEXT: vinsgr2vr.h $vr1, $a1, 0
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvreplgr2vr.h $xr3, $a1
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 17
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 34
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 51
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 68
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 85
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 102
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 119
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 0
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 17
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 34
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 51
-; CHECK-NEXT: xvori.b $xr2, $xr3, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 68
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr2, 85
-; CHECK-NEXT: xvpermi.q $xr3, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr3, 102
-; CHECK-NEXT: xvpermi.q $xr0, $xr1, 48
-; CHECK-NEXT: xvextrins.h $xr1, $xr0, 119
-; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT: xvreplve0.w $xr0, $xr0
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0
@@ -2113,14 +1568,11 @@ entry:
define void @buildvector_v8i32_subseq_2(ptr %dst, i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; CHECK-LABEL: buildvector_v8i32_subseq_2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 2
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 3
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 5
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 6
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 7
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a4, 3
+; CHECK-NEXT: xvreplve0.q $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -2139,14 +1591,9 @@ entry:
define void @buildvector_v8i32_subseq_4(ptr %dst, i32 %a0, i32 %a1) nounwind {
; CHECK-LABEL: buildvector_v8i32_subseq_4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 3
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 5
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 6
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 7
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 1
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -2249,23 +1696,19 @@ entry:
define void @buildvector_v4i64_subseq_2(ptr %dst, i64 %a0, i64 %a1) nounwind {
; LA32-LABEL: buildvector_v4i64_subseq_2:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a3, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a4, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a3, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a4, 7
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 1
+; LA32-NEXT: vinsgr2vr.w $vr0, $a3, 2
+; LA32-NEXT: vinsgr2vr.w $vr0, $a4, 3
+; LA32-NEXT: xvreplve0.q $xr0, $xr0
; LA32-NEXT: xvst $xr0, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: buildvector_v4i64_subseq_2:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a1, 0
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a2, 1
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a1, 2
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a2, 3
+; LA64-NEXT: vinsgr2vr.d $vr0, $a1, 0
+; LA64-NEXT: vinsgr2vr.d $vr0, $a2, 1
+; LA64-NEXT: xvreplve0.q $xr0, $xr0
; LA64-NEXT: xvst $xr0, $a0, 0
; LA64-NEXT: ret
entry:
@@ -2367,19 +1810,15 @@ entry:
define void @buildvector_v8f32_subseq_2(ptr %dst, float %a0, float %a1, float %a2, float %a3) nounwind {
; CHECK-LABEL: buildvector_v8f32_subseq_2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3
-; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2
-; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT: # kill: def $f3 killed $f3 def $vr3
+; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvori.b $xr4, $xr0, 0
-; CHECK-NEXT: xvinsve0.w $xr4, $xr1, 1
-; CHECK-NEXT: xvinsve0.w $xr4, $xr2, 2
-; CHECK-NEXT: xvinsve0.w $xr4, $xr3, 3
-; CHECK-NEXT: xvinsve0.w $xr4, $xr0, 4
-; CHECK-NEXT: xvinsve0.w $xr4, $xr1, 5
-; CHECK-NEXT: xvinsve0.w $xr4, $xr2, 6
-; CHECK-NEXT: xvinsve0.w $xr4, $xr3, 7
-; CHECK-NEXT: xvst $xr4, $a0, 0
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT: vextrins.w $vr0, $vr2, 32
+; CHECK-NEXT: vextrins.w $vr0, $vr3, 48
+; CHECK-NEXT: xvreplve0.q $xr0, $xr0
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x float> undef, float %a0, i32 0
@@ -2397,17 +1836,11 @@ entry:
define void @buildvector_v8f32_subseq_4(ptr %dst, float %a0, float %a1) nounwind {
; CHECK-LABEL: buildvector_v8f32_subseq_4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvinsve0.w $xr2, $xr1, 1
-; CHECK-NEXT: xvinsve0.w $xr2, $xr0, 2
-; CHECK-NEXT: xvinsve0.w $xr2, $xr1, 3
-; CHECK-NEXT: xvinsve0.w $xr2, $xr0, 4
-; CHECK-NEXT: xvinsve0.w $xr2, $xr1, 5
-; CHECK-NEXT: xvinsve0.w $xr2, $xr0, 6
-; CHECK-NEXT: xvinsve0.w $xr2, $xr1, 7
-; CHECK-NEXT: xvst $xr2, $a0, 0
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x float> undef, float %a0, i32 0
@@ -2482,13 +1915,11 @@ entry:
define void @buildvector_v4f64_subseq_2(ptr %dst, double %a0, double %a1) nounwind {
; CHECK-LABEL: buildvector_v4f64_subseq_2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1
+; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $vr1
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvori.b $xr2, $xr0, 0
-; CHECK-NEXT: xvinsve0.d $xr2, $xr1, 1
-; CHECK-NEXT: xvinsve0.d $xr2, $xr0, 2
-; CHECK-NEXT: xvinsve0.d $xr2, $xr1, 3
-; CHECK-NEXT: xvst $xr2, $a0, 0
+; CHECK-NEXT: vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT: xvreplve0.q $xr0, $xr0
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x double> undef, double %a0, i32 0
diff --git a/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll b/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
index 349684ff22be2..41c38b01ce98e 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
@@ -11,8 +11,7 @@ define <2 x i64> @should_not_be_optimized(ptr %ptr, ptr %dst){
; LA32-NEXT: st.w $a2, $a1, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 2
-; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT: vreplvei.d $vr0, $vr0, 0
; LA32-NEXT: st.w $a0, $a1, 4
; LA32-NEXT: ret
;
@@ -62,8 +61,7 @@ define <2 x i64> @vldrepl_d_unaligned_offset(ptr %ptr) {
; LA32-NEXT: ld.w $a0, $a0, 8
; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT: vreplvei.d $vr0, $vr0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: vldrepl_d_unaligned_offset:
@@ -155,8 +153,7 @@ define <2 x i64> @vldrepl_d(ptr %ptr) {
; LA32-NEXT: ld.w $a0, $a0, 4
; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT: vreplvei.d $vr0, $vr0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: vldrepl_d:
@@ -176,8 +173,7 @@ define <2 x i64> @vldrepl_d_offset(ptr %ptr) {
; LA32-NEXT: ld.w $a0, $a0, 268
; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT: vreplvei.d $vr0, $vr0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: vldrepl_d_offset:
diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
index ac28151dc2f54..cae7c08f2d685 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
@@ -46,8 +46,7 @@ define void @buildvector_v2i64_splat(ptr %dst, i64 %a0) nounwind {
; LA32: # %bb.0: # %entry
; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 0
; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 1
-; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 3
+; LA32-NEXT: vreplvei.d $vr0, $vr0, 0
; LA32-NEXT: vst $vr0, $a0, 0
; LA32-NEXT: ret
;
@@ -455,14 +454,7 @@ define void @buildvector_v16i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 5
; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 6
; CHECK-NEXT: vinsgr2vr.b $vr0, $t0, 7
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 10
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 11
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 12
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 13
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 14
-; CHECK-NEXT: vinsgr2vr.b $vr0, $t0, 15
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -493,18 +485,7 @@ define void @buildvector_v16i8_subseq_4(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 2
; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 3
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 4
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 5
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 6
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 7
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 10
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 11
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 12
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 13
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 14
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 15
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -533,20 +514,7 @@ define void @buildvector_v16i8_subseq_8(ptr %dst, i8 %a0, i8 %a1) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 2
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 3
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 4
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 5
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 6
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 7
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 10
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 11
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 12
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 13
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 14
-; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 15
+; CHECK-NEXT: vreplvei.h $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -649,10 +617,7 @@ define void @buildvector_v8i16_subseq_2(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i16
; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1
; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 2
; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 3
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 4
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 5
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 6
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 7
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -673,12 +638,7 @@ define void @buildvector_v8i16_subseq_4(ptr %dst, i16 %a0, i16 %a1) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 2
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 3
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 4
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 5
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 6
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 7
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -751,8 +711,7 @@ define void @buildvector_v4i32_subseq_2(ptr %dst, i32 %a0, i32 %a1) nounwind {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0
; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 1
-; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 2
-; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 3
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -893,11 +852,9 @@ define void @buildvector_v4f32_subseq_2(ptr %dst, float %a0, float %a1) nounwind
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT: vori.b $vr2, $vr0, 0
-; CHECK-NEXT: vextrins.w $vr2, $vr1, 16
-; CHECK-NEXT: vextrins.w $vr2, $vr0, 32
-; CHECK-NEXT: vextrins.w $vr2, $vr1, 48
-; CHECK-NEXT: vst $vr2, $a0, 0
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x float> undef, float %a0, i32 0
More information about the llvm-commits
mailing list