[llvm] [LoongArch] Broadcast repeated subsequence in build_vector instead of inserting per element (PR #154533)

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 1 19:21:30 PDT 2025


https://github.com/zhaoqi5 updated https://github.com/llvm/llvm-project/pull/154533

>From 0de63e45f91ad0e32de4ee15ba8fac84b2bdb64d Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Wed, 20 Aug 2025 20:39:50 +0800
Subject: [PATCH 1/5] [LoongArch] Broadcast repeated subsequence in
 build_vector instead of inserting per element

---
 .../LoongArch/LoongArchISelLowering.cpp       | 53 +++++++++++++++++++
 .../Target/LoongArch/LoongArchISelLowering.h  |  2 +
 .../LoongArch/LoongArchLASXInstrInfo.td       | 22 +++++++-
 3 files changed, 76 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 8313869c1016d..3f7f2f94a16f7 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2539,6 +2539,7 @@ static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp,
 SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
                                                    SelectionDAG &DAG) const {
   BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
+  MVT VT = Node->getSimpleValueType(0);
   EVT ResTy = Op->getValueType(0);
   unsigned NumElts = ResTy.getVectorNumElements();
   SDLoc DL(Op);
@@ -2632,6 +2633,56 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
   }
 
   if (!IsConstant) {
+    // If the BUILD_VECTOR has a repeated pattern, use INSERT_VECTOR_ELT to fill
+    // the sub-sequence of the vector and then broadcast the sub-sequence.
+    SmallVector<SDValue> Sequence;
+    BitVector UndefElements;
+    if (Node->getRepeatedSequence(Sequence, &UndefElements)) {
+      // TODO: If the BUILD_VECTOR contains undef elements, consider falling
+      // back to use INSERT_VECTOR_ELT to materialize the vector, because it
+      // generates worse code in some cases. This could be further optimized
+      // with more consideration.
+      if (UndefElements.count() == 0) {
+        unsigned SeqLen = Sequence.size();
+
+        SDValue Op0 = Sequence[0];
+        SDValue Vector = DAG.getUNDEF(ResTy);
+        if (!Op0.isUndef())
+          Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Op0);
+        for (unsigned i = 1; i < SeqLen; ++i) {
+          SDValue Opi = Sequence[i];
+          if (Opi.isUndef())
+            continue;
+          Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Opi,
+                               DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+        }
+
+        unsigned SplatLen = NumElts / SeqLen;
+        MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
+        MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);
+
+        // If size of the sub-sequence is half of a 256-bits vector, bitcast the
+        // vector to v4i64 type in order to match the pattern of XVREPLVE0Q.
+        if (SplatEltTy == MVT::i128)
+          SplatTy = MVT::v4i64;
+
+        SDValue SrcVec = DAG.getBitcast(SplatTy, Vector);
+        SDValue SplatVec;
+        if (SplatTy.is256BitVector()) {
+          SplatVec =
+              DAG.getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
+                                                    : LoongArchISD::XVREPLVE0,
+                          DL, SplatTy, SrcVec);
+        } else {
+          SplatVec =
+              DAG.getNode(LoongArchISD::VREPLVEI, DL, SplatTy, SrcVec,
+                          DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
+        }
+
+        return DAG.getBitcast(ResTy, SplatVec);
+      }
+    }
+
     // Use INSERT_VECTOR_ELT operations rather than expand to stores.
     // The resulting code is the same length as the expansion, but it doesn't
     // use memory operations.
@@ -6913,6 +6964,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
     NODE_NAME_CASE(VREPLVEI)
     NODE_NAME_CASE(VREPLGR2VR)
     NODE_NAME_CASE(XVPERMI)
+    NODE_NAME_CASE(XVREPLVE0)
+    NODE_NAME_CASE(XVREPLVE0Q)
     NODE_NAME_CASE(VPICK_SEXT_ELT)
     NODE_NAME_CASE(VPICK_ZEXT_ELT)
     NODE_NAME_CASE(VREPLVE)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index f2058939bf5df..f3eaba6f24955 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -145,6 +145,8 @@ enum NodeType : unsigned {
   VREPLVEI,
   VREPLGR2VR,
   XVPERMI,
+  XVREPLVE0,
+  XVREPLVE0Q,
 
   // Extended vector element extraction
   VPICK_SEXT_ELT,
diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index cf63750461edd..d2fe3b0f1b0d8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -10,8 +10,13 @@
 //
 //===----------------------------------------------------------------------===//
 
+def SDT_LoongArchXVREPLVE0 : SDTypeProfile<1, 1, [SDTCisVec<0>,
+                                                  SDTCisSameAs<0, 1>]>;
+
 // Target nodes.
 def loongarch_xvpermi: SDNode<"LoongArchISD::XVPERMI", SDT_LoongArchV1RUimm>;
+def loongarch_xvreplve0: SDNode<"LoongArchISD::XVREPLVE0", SDT_LoongArchXVREPLVE0>;
+def loongarch_xvreplve0q: SDNode<"LoongArchISD::XVREPLVE0Q", SDT_LoongArchXVREPLVE0>;
 def loongarch_xvmskltz: SDNode<"LoongArchISD::XVMSKLTZ", SDT_LoongArchVMSKCOND>;
 def loongarch_xvmskgez: SDNode<"LoongArchISD::XVMSKGEZ", SDT_LoongArchVMSKCOND>;
 def loongarch_xvmskeqz: SDNode<"LoongArchISD::XVMSKEQZ", SDT_LoongArchVMSKCOND>;
@@ -1866,11 +1871,26 @@ def : Pat<(loongarch_xvpermi v4i64:$xj, immZExt8: $ui8),
 def : Pat<(loongarch_xvpermi v4f64:$xj, immZExt8: $ui8),
           (XVPERMI_D v4f64:$xj, immZExt8: $ui8)>;
 
-// XVREPLVE0_{W/D}
+// XVREPLVE0_{B/H/W/D/Q}
+def : Pat<(loongarch_xvreplve0 v32i8:$xj),
+          (XVREPLVE0_B v32i8:$xj)>;
+def : Pat<(loongarch_xvreplve0 v16i16:$xj),
+          (XVREPLVE0_H v16i16:$xj)>;
+def : Pat<(loongarch_xvreplve0 v8i32:$xj),
+          (XVREPLVE0_W v8i32:$xj)>;
+def : Pat<(loongarch_xvreplve0 v4i64:$xj),
+          (XVREPLVE0_D v4i64:$xj)>;
+def : Pat<(loongarch_xvreplve0 v8f32:$xj),
+          (XVREPLVE0_W v8f32:$xj)>;
+def : Pat<(loongarch_xvreplve0 v4f64:$xj),
+          (XVREPLVE0_D v4f64:$xj)>;
 def : Pat<(lasxsplatf32 FPR32:$fj),
           (XVREPLVE0_W (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32))>;
 def : Pat<(lasxsplatf64 FPR64:$fj),
           (XVREPLVE0_D (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64))>;
+foreach vt = [v32i8, v16i16, v8i32, v4i64, v8f32, v4f64] in
+  def : Pat<(vt (loongarch_xvreplve0q LASX256:$xj)),
+            (XVREPLVE0_Q LASX256:$xj)>;
 
 // VSTELM
 defm : VstelmPat<truncstorei8, v32i8, XVSTELM_B, simm8, uimm5>;

>From e82feabbdb2416bb1e074f684c10d97e698d6eeb Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Thu, 21 Aug 2025 10:48:46 +0800
Subject: [PATCH 2/5] using lsx inserting

---
 .../LoongArch/LoongArchISelLowering.cpp       | 32 ++++++++++++++++---
 1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 3f7f2f94a16f7..1fa37a2f832a8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2645,18 +2645,42 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
       if (UndefElements.count() == 0) {
         unsigned SeqLen = Sequence.size();
 
-        SDValue Op0 = Sequence[0];
         SDValue Vector = DAG.getUNDEF(ResTy);
+        SDValue FillVec = Vector;
+        EVT FillTy = ResTy;
+
+        // Using LSX instructions to fill the sub-sequence of 256-bits vector,
+        // because the high part can be simply treated as undef.
+        if (ResTy.is256BitVector()) {
+          MVT HalfEltTy;
+          if (ResTy.isFloatingPoint())
+            HalfEltTy = MVT::getFloatingPointVT(VT.getScalarSizeInBits());
+          else
+            HalfEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits());
+          EVT HalfTy = MVT::getVectorVT(HalfEltTy, NumElts / 2);
+          SDValue HalfVec = DAG.getExtractSubvector(DL, HalfTy, Vector, 0);
+
+          FillVec = HalfVec;
+          FillTy = HalfTy;
+        }
+
+        SDValue Op0 = Sequence[0];
         if (!Op0.isUndef())
-          Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Op0);
+          FillVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, FillTy, Op0);
         for (unsigned i = 1; i < SeqLen; ++i) {
           SDValue Opi = Sequence[i];
           if (Opi.isUndef())
             continue;
-          Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Opi,
-                               DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+          FillVec =
+              DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, FillTy, FillVec, Opi,
+                          DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
         }
 
+        if (ResTy.is256BitVector())
+          Vector = DAG.getInsertSubvector(DL, Vector, FillVec, 0);
+        else
+          Vector = FillVec;
+
         unsigned SplatLen = NumElts / SeqLen;
         MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
         MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);

>From f3020313668d8813c7c40ccd5edb184e6e291d4b Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Fri, 22 Aug 2025 16:12:27 +0800
Subject: [PATCH 3/5] update

---
 .../LoongArch/LoongArchISelLowering.cpp       | 116 ++++++++----------
 1 file changed, 51 insertions(+), 65 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 1fa37a2f832a8..3bb1479f20eb4 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2635,76 +2635,62 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
   if (!IsConstant) {
     // If the BUILD_VECTOR has a repeated pattern, use INSERT_VECTOR_ELT to fill
     // the sub-sequence of the vector and then broadcast the sub-sequence.
+    //
+    // TODO: If the BUILD_VECTOR contains undef elements, consider falling
+    // back to use INSERT_VECTOR_ELT to materialize the vector, because it
+    // generates worse code in some cases. This could be further optimized
+    // with more consideration.
     SmallVector<SDValue> Sequence;
     BitVector UndefElements;
-    if (Node->getRepeatedSequence(Sequence, &UndefElements)) {
-      // TODO: If the BUILD_VECTOR contains undef elements, consider falling
-      // back to use INSERT_VECTOR_ELT to materialize the vector, because it
-      // generates worse code in some cases. This could be further optimized
-      // with more consideration.
-      if (UndefElements.count() == 0) {
-        unsigned SeqLen = Sequence.size();
-
-        SDValue Vector = DAG.getUNDEF(ResTy);
-        SDValue FillVec = Vector;
-        EVT FillTy = ResTy;
-
-        // Using LSX instructions to fill the sub-sequence of 256-bits vector,
-        // because the high part can be simply treated as undef.
-        if (ResTy.is256BitVector()) {
-          MVT HalfEltTy;
-          if (ResTy.isFloatingPoint())
-            HalfEltTy = MVT::getFloatingPointVT(VT.getScalarSizeInBits());
-          else
-            HalfEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits());
-          EVT HalfTy = MVT::getVectorVT(HalfEltTy, NumElts / 2);
-          SDValue HalfVec = DAG.getExtractSubvector(DL, HalfTy, Vector, 0);
-
-          FillVec = HalfVec;
-          FillTy = HalfTy;
-        }
-
-        SDValue Op0 = Sequence[0];
-        if (!Op0.isUndef())
-          FillVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, FillTy, Op0);
-        for (unsigned i = 1; i < SeqLen; ++i) {
-          SDValue Opi = Sequence[i];
-          if (Opi.isUndef())
-            continue;
-          FillVec =
-              DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, FillTy, FillVec, Opi,
-                          DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
-        }
+    if (Node->getRepeatedSequence(Sequence, &UndefElements) &&
+        UndefElements.count() == 0) {
+      SDValue Vector = DAG.getUNDEF(ResTy);
+      SDValue FillVec = Vector;
+      EVT FillTy = ResTy;
+
+      // Using LSX instructions to fill the sub-sequence of 256-bits vector,
+      // because the high part can be simply treated as undef.
+      if (Is256Vec) {
+        FillTy = ResTy.getHalfNumVectorElementsVT(*DAG.getContext());
+        FillVec = DAG.getExtractSubvector(DL, FillTy, Vector, 0);
+      }
 
-        if (ResTy.is256BitVector())
-          Vector = DAG.getInsertSubvector(DL, Vector, FillVec, 0);
-        else
-          Vector = FillVec;
-
-        unsigned SplatLen = NumElts / SeqLen;
-        MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
-        MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);
-
-        // If size of the sub-sequence is half of a 256-bits vector, bitcast the
-        // vector to v4i64 type in order to match the pattern of XVREPLVE0Q.
-        if (SplatEltTy == MVT::i128)
-          SplatTy = MVT::v4i64;
-
-        SDValue SrcVec = DAG.getBitcast(SplatTy, Vector);
-        SDValue SplatVec;
-        if (SplatTy.is256BitVector()) {
-          SplatVec =
-              DAG.getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
-                                                    : LoongArchISD::XVREPLVE0,
-                          DL, SplatTy, SrcVec);
-        } else {
-          SplatVec =
-              DAG.getNode(LoongArchISD::VREPLVEI, DL, SplatTy, SrcVec,
-                          DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
-        }
+      SDValue Op0 = Sequence[0];
+      unsigned SeqLen = Sequence.size();
+      if (!Op0.isUndef())
+        FillVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, FillTy, Op0);
+      for (unsigned i = 1; i < SeqLen; ++i) {
+        SDValue Opi = Sequence[i];
+        if (Opi.isUndef())
+          continue;
+        FillVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, FillTy, FillVec, Opi,
+                              DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+      }
 
-        return DAG.getBitcast(ResTy, SplatVec);
+      unsigned SplatLen = NumElts / SeqLen;
+      MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
+      MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);
+
+      // If size of the sub-sequence is half of a 256-bits vector, bitcast the
+      // vector to v4i64 type in order to match the pattern of XVREPLVE0Q.
+      if (SplatEltTy == MVT::i128)
+        SplatTy = MVT::v4i64;
+
+      SDValue SplatVec;
+      SDValue SrcVec = DAG.getBitcast(
+          SplatTy,
+          Is256Vec ? DAG.getInsertSubvector(DL, Vector, FillVec, 0) : FillVec);
+      if (Is256Vec) {
+        SplatVec =
+            DAG.getNode((SplatEltTy == MVT::i128) ? LoongArchISD::XVREPLVE0Q
+                                                  : LoongArchISD::XVREPLVE0,
+                        DL, SplatTy, SrcVec);
+      } else {
+        SplatVec = DAG.getNode(LoongArchISD::VREPLVEI, DL, SplatTy, SrcVec,
+                               DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
       }
+
+      return DAG.getBitcast(ResTy, SplatVec);
     }
 
     // Use INSERT_VECTOR_ELT operations rather than expand to stores.

>From 2b37a267cb47363de475c89cc9e74b0979279127 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 2 Sep 2025 10:07:24 +0800
Subject: [PATCH 4/5] la32 passed

---
 .../Target/LoongArch/LoongArchLSXInstrInfo.td | 28 +++++++++----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index eb7120ffb41a6..932a27f001409 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -82,11 +82,11 @@ def loongarch_vmskgez: SDNode<"LoongArchISD::VMSKGEZ", SDT_LoongArchVMSKCOND>;
 def loongarch_vmskeqz: SDNode<"LoongArchISD::VMSKEQZ", SDT_LoongArchVMSKCOND>;
 def loongarch_vmsknez: SDNode<"LoongArchISD::VMSKNEZ", SDT_LoongArchVMSKCOND>;
 
-def immZExt1 : ImmLeaf<i64, [{return isUInt<1>(Imm);}]>;
-def immZExt2 : ImmLeaf<i64, [{return isUInt<2>(Imm);}]>;
-def immZExt3 : ImmLeaf<i64, [{return isUInt<3>(Imm);}]>;
-def immZExt4 : ImmLeaf<i64, [{return isUInt<4>(Imm);}]>;
-def immZExt8 : ImmLeaf<i64, [{return isUInt<8>(Imm);}]>;
+def immZExt1 : ImmLeaf<GRLenVT, [{return isUInt<1>(Imm);}]>;
+def immZExt2 : ImmLeaf<GRLenVT, [{return isUInt<2>(Imm);}]>;
+def immZExt3 : ImmLeaf<GRLenVT, [{return isUInt<3>(Imm);}]>;
+def immZExt4 : ImmLeaf<GRLenVT, [{return isUInt<4>(Imm);}]>;
+def immZExt8 : ImmLeaf<GRLenVT, [{return isUInt<8>(Imm);}]>;
 
 class VecCond<SDPatternOperator OpNode, ValueType TyNode,
               RegisterClass RC = LSX128>
@@ -2016,15 +2016,15 @@ def : Pat<(loongarch_vilvh v4f32:$vj, v4f32:$vk),
 def : Pat<(loongarch_vilvh v2f64:$vj, v2f64:$vk),
           (VILVH_D v2f64:$vj, v2f64:$vk)>;
 
-// VSHUF4I_{B/H/W}
+// VSHUF4I_{B/H/W/D}
 def : Pat<(loongarch_vshuf4i v16i8:$vj, immZExt8:$ui8),
           (VSHUF4I_B v16i8:$vj, immZExt8:$ui8)>;
 def : Pat<(loongarch_vshuf4i v8i16:$vj, immZExt8:$ui8),
-        (VSHUF4I_H v8i16:$vj, immZExt8:$ui8)>;
+          (VSHUF4I_H v8i16:$vj, immZExt8:$ui8)>;
 def : Pat<(loongarch_vshuf4i v4i32:$vj, immZExt8:$ui8),
-        (VSHUF4I_W v4i32:$vj, immZExt8:$ui8)>;
+          (VSHUF4I_W v4i32:$vj, immZExt8:$ui8)>;
 def : Pat<(loongarch_vshuf4i v4f32:$vj, immZExt8:$ui8),
-        (VSHUF4I_W v4f32:$vj, immZExt8:$ui8)>;
+          (VSHUF4I_W v4f32:$vj, immZExt8:$ui8)>;
 def : Pat<(loongarch_vshuf4i_d v2i64:$vj, v2i64:$vk, immZExt8:$ui8),
           (VSHUF4I_D v2i64:$vj, v2i64:$vk, immZExt8:$ui8)>;
 def : Pat<(loongarch_vshuf4i_d v2f64:$vj, v2f64:$vk, immZExt8:$ui8),
@@ -2034,15 +2034,15 @@ def : Pat<(loongarch_vshuf4i_d v2f64:$vj, v2f64:$vk, immZExt8:$ui8),
 def : Pat<(loongarch_vreplvei v16i8:$vj, immZExt4:$ui4),
           (VREPLVEI_B v16i8:$vj, immZExt4:$ui4)>;
 def : Pat<(loongarch_vreplvei v8i16:$vj, immZExt3:$ui3),
-        (VREPLVEI_H v8i16:$vj, immZExt3:$ui3)>;
+          (VREPLVEI_H v8i16:$vj, immZExt3:$ui3)>;
 def : Pat<(loongarch_vreplvei v4i32:$vj, immZExt2:$ui2),
-        (VREPLVEI_W v4i32:$vj, immZExt2:$ui2)>;
+          (VREPLVEI_W v4i32:$vj, immZExt2:$ui2)>;
 def : Pat<(loongarch_vreplvei v2i64:$vj, immZExt1:$ui1),
-        (VREPLVEI_D v2i64:$vj, immZExt1:$ui1)>;
+          (VREPLVEI_D v2i64:$vj, immZExt1:$ui1)>;
 def : Pat<(loongarch_vreplvei v4f32:$vj, immZExt2:$ui2),
-        (VREPLVEI_W v4f32:$vj, immZExt2:$ui2)>;
+          (VREPLVEI_W v4f32:$vj, immZExt2:$ui2)>;
 def : Pat<(loongarch_vreplvei v2f64:$vj, immZExt1:$ui1),
-        (VREPLVEI_D v2f64:$vj, immZExt1:$ui1)>;
+          (VREPLVEI_D v2f64:$vj, immZExt1:$ui1)>;
 
 // VREPLVEI_{W/D}
 def : Pat<(lsxsplatf32 FPR32:$fj),

>From 0e3aa0dc0e25fb8f62dd15396995b1db7516f643 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 2 Sep 2025 10:07:46 +0800
Subject: [PATCH 5/5] update tests

---
 .../CodeGen/LoongArch/lasx/broadcast-load.ll  |  44 +-
 .../CodeGen/LoongArch/lasx/build-vector.ll    | 763 +++---------------
 .../CodeGen/LoongArch/lsx/broadcast-load.ll   |  12 +-
 .../CodeGen/LoongArch/lsx/build-vector.ll     |  63 +-
 4 files changed, 123 insertions(+), 759 deletions(-)

diff --git a/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll b/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
index 4aa2bd76ab80c..7407e0d5f4195 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/broadcast-load.ll
@@ -9,14 +9,9 @@ define <4 x i64> @should_not_be_optimized(ptr %ptr, ptr %dst) {
 ; LA32-NEXT:    ld.w $a2, $a0, 0
 ; LA32-NEXT:    ld.w $a0, $a0, 4
 ; LA32-NEXT:    st.w $a2, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT:    xvreplve0.d $xr0, $xr0
 ; LA32-NEXT:    st.w $a0, $a1, 4
 ; LA32-NEXT:    ret
 ;
@@ -64,14 +59,9 @@ define <4 x i64> @xvldrepl_d_unaligned_offset(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.w $a1, $a0, 4
 ; LA32-NEXT:    ld.w $a0, $a0, 8
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT:    xvreplve0.d $xr0, $xr0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xvldrepl_d_unaligned_offset:
@@ -162,14 +152,9 @@ define <4 x i64> @xvldrepl_d(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.w $a1, $a0, 0
 ; LA32-NEXT:    ld.w $a0, $a0, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT:    xvreplve0.d $xr0, $xr0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xvldrepl_d:
@@ -187,14 +172,9 @@ define <4 x i64> @xvldrepl_d_offset(ptr %ptr) {
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    ld.w $a1, $a0, 264
 ; LA32-NEXT:    ld.w $a0, $a0, 268
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a0, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
+; LA32-NEXT:    xvreplve0.d $xr0, $xr0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: xvldrepl_d_offset:
diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
index 23245726c8968..b7fdaff37f759 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
@@ -44,14 +44,9 @@ entry:
 define void @buildvector_v4i64_splat(ptr %dst, i64 %a0) nounwind {
 ; LA32-LABEL: buildvector_v4i64_splat:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 1
+; LA32-NEXT:    xvreplve0.d $xr0, $xr0
 ; LA32-NEXT:    xvst $xr0, $a0, 0
 ; LA32-NEXT:    ret
 ;
@@ -879,100 +874,23 @@ define void @buildvector_v32i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; LA32-NEXT:    ld.b $t6, $sp, 8
 ; LA32-NEXT:    ld.b $t7, $sp, 4
 ; LA32-NEXT:    ld.b $t8, $sp, 0
-; LA32-NEXT:    xvreplgr2vr.b $xr1, $a2
 ; LA32-NEXT:    vinsgr2vr.b $vr0, $a1, 0
-; LA32-NEXT:    xvori.b $xr3, $xr1, 0
-; LA32-NEXT:    xvpermi.q $xr3, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr2, $a3
-; LA32-NEXT:    xvextrins.b $xr0, $xr3, 17
-; LA32-NEXT:    xvori.b $xr4, $xr2, 0
-; LA32-NEXT:    xvpermi.q $xr4, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr3, $a4
-; LA32-NEXT:    xvextrins.b $xr0, $xr4, 34
-; LA32-NEXT:    xvori.b $xr5, $xr3, 0
-; LA32-NEXT:    xvpermi.q $xr5, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr4, $a5
-; LA32-NEXT:    xvextrins.b $xr0, $xr5, 51
-; LA32-NEXT:    xvori.b $xr6, $xr4, 0
-; LA32-NEXT:    xvpermi.q $xr6, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr5, $a6
-; LA32-NEXT:    xvextrins.b $xr0, $xr6, 68
-; LA32-NEXT:    xvori.b $xr7, $xr5, 0
-; LA32-NEXT:    xvpermi.q $xr7, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr6, $a7
-; LA32-NEXT:    xvextrins.b $xr0, $xr7, 85
-; LA32-NEXT:    xvori.b $xr8, $xr6, 0
-; LA32-NEXT:    xvpermi.q $xr8, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr7, $t8
-; LA32-NEXT:    xvextrins.b $xr0, $xr8, 102
-; LA32-NEXT:    xvori.b $xr9, $xr7, 0
-; LA32-NEXT:    xvpermi.q $xr9, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr8, $t7
-; LA32-NEXT:    xvextrins.b $xr0, $xr9, 119
-; LA32-NEXT:    xvori.b $xr10, $xr8, 0
-; LA32-NEXT:    xvpermi.q $xr10, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr9, $t6
-; LA32-NEXT:    xvextrins.b $xr0, $xr10, 136
-; LA32-NEXT:    xvori.b $xr11, $xr9, 0
-; LA32-NEXT:    xvpermi.q $xr11, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr10, $t5
-; LA32-NEXT:    xvextrins.b $xr0, $xr11, 153
-; LA32-NEXT:    xvori.b $xr12, $xr10, 0
-; LA32-NEXT:    xvpermi.q $xr12, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr11, $t4
-; LA32-NEXT:    xvextrins.b $xr0, $xr12, 170
-; LA32-NEXT:    xvori.b $xr13, $xr11, 0
-; LA32-NEXT:    xvpermi.q $xr13, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr12, $t3
-; LA32-NEXT:    xvextrins.b $xr0, $xr13, 187
-; LA32-NEXT:    xvori.b $xr14, $xr12, 0
-; LA32-NEXT:    xvpermi.q $xr14, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr13, $t2
-; LA32-NEXT:    xvextrins.b $xr0, $xr14, 204
-; LA32-NEXT:    xvori.b $xr15, $xr13, 0
-; LA32-NEXT:    xvpermi.q $xr15, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr14, $t1
-; LA32-NEXT:    xvextrins.b $xr0, $xr15, 221
-; LA32-NEXT:    xvori.b $xr16, $xr14, 0
-; LA32-NEXT:    xvpermi.q $xr16, $xr0, 18
-; LA32-NEXT:    xvreplgr2vr.b $xr15, $t0
-; LA32-NEXT:    xvextrins.b $xr0, $xr16, 238
-; LA32-NEXT:    xvori.b $xr16, $xr15, 0
-; LA32-NEXT:    xvpermi.q $xr16, $xr0, 18
-; LA32-NEXT:    xvextrins.b $xr0, $xr16, 255
-; LA32-NEXT:    xvreplgr2vr.b $xr16, $a1
-; LA32-NEXT:    xvpermi.q $xr16, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr16, 0
-; LA32-NEXT:    xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr1, 17
-; LA32-NEXT:    xvpermi.q $xr2, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr2, 34
-; LA32-NEXT:    xvpermi.q $xr3, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr3, 51
-; LA32-NEXT:    xvpermi.q $xr4, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr4, 68
-; LA32-NEXT:    xvpermi.q $xr5, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr5, 85
-; LA32-NEXT:    xvpermi.q $xr6, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr6, 102
-; LA32-NEXT:    xvpermi.q $xr7, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr7, 119
-; LA32-NEXT:    xvpermi.q $xr8, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr8, 136
-; LA32-NEXT:    xvpermi.q $xr9, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr9, 153
-; LA32-NEXT:    xvpermi.q $xr10, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr10, 170
-; LA32-NEXT:    xvpermi.q $xr11, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr11, 187
-; LA32-NEXT:    xvpermi.q $xr12, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr12, 204
-; LA32-NEXT:    xvpermi.q $xr13, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr13, 221
-; LA32-NEXT:    xvpermi.q $xr14, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr14, 238
-; LA32-NEXT:    xvpermi.q $xr15, $xr0, 48
-; LA32-NEXT:    xvextrins.b $xr0, $xr15, 255
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a2, 1
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a3, 2
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a4, 3
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a5, 4
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a6, 5
+; LA32-NEXT:    vinsgr2vr.b $vr0, $a7, 6
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t8, 7
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t7, 8
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t6, 9
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t5, 10
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t4, 11
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t3, 12
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t2, 13
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t1, 14
+; LA32-NEXT:    vinsgr2vr.b $vr0, $t0, 15
+; LA32-NEXT:    xvreplve0.q $xr0, $xr0
 ; LA32-NEXT:    xvst $xr0, $a0, 0
 ; LA32-NEXT:    ret
 ;
@@ -987,100 +905,23 @@ define void @buildvector_v32i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; LA64-NEXT:    ld.b $t6, $sp, 16
 ; LA64-NEXT:    ld.b $t7, $sp, 8
 ; LA64-NEXT:    ld.b $t8, $sp, 0
-; LA64-NEXT:    xvreplgr2vr.b $xr1, $a2
 ; LA64-NEXT:    vinsgr2vr.b $vr0, $a1, 0
-; LA64-NEXT:    xvori.b $xr3, $xr1, 0
-; LA64-NEXT:    xvpermi.q $xr3, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr2, $a3
-; LA64-NEXT:    xvextrins.b $xr0, $xr3, 17
-; LA64-NEXT:    xvori.b $xr4, $xr2, 0
-; LA64-NEXT:    xvpermi.q $xr4, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr3, $a4
-; LA64-NEXT:    xvextrins.b $xr0, $xr4, 34
-; LA64-NEXT:    xvori.b $xr5, $xr3, 0
-; LA64-NEXT:    xvpermi.q $xr5, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr4, $a5
-; LA64-NEXT:    xvextrins.b $xr0, $xr5, 51
-; LA64-NEXT:    xvori.b $xr6, $xr4, 0
-; LA64-NEXT:    xvpermi.q $xr6, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr5, $a6
-; LA64-NEXT:    xvextrins.b $xr0, $xr6, 68
-; LA64-NEXT:    xvori.b $xr7, $xr5, 0
-; LA64-NEXT:    xvpermi.q $xr7, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr6, $a7
-; LA64-NEXT:    xvextrins.b $xr0, $xr7, 85
-; LA64-NEXT:    xvori.b $xr8, $xr6, 0
-; LA64-NEXT:    xvpermi.q $xr8, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr7, $t8
-; LA64-NEXT:    xvextrins.b $xr0, $xr8, 102
-; LA64-NEXT:    xvori.b $xr9, $xr7, 0
-; LA64-NEXT:    xvpermi.q $xr9, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr8, $t7
-; LA64-NEXT:    xvextrins.b $xr0, $xr9, 119
-; LA64-NEXT:    xvori.b $xr10, $xr8, 0
-; LA64-NEXT:    xvpermi.q $xr10, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr9, $t6
-; LA64-NEXT:    xvextrins.b $xr0, $xr10, 136
-; LA64-NEXT:    xvori.b $xr11, $xr9, 0
-; LA64-NEXT:    xvpermi.q $xr11, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr10, $t5
-; LA64-NEXT:    xvextrins.b $xr0, $xr11, 153
-; LA64-NEXT:    xvori.b $xr12, $xr10, 0
-; LA64-NEXT:    xvpermi.q $xr12, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr11, $t4
-; LA64-NEXT:    xvextrins.b $xr0, $xr12, 170
-; LA64-NEXT:    xvori.b $xr13, $xr11, 0
-; LA64-NEXT:    xvpermi.q $xr13, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr12, $t3
-; LA64-NEXT:    xvextrins.b $xr0, $xr13, 187
-; LA64-NEXT:    xvori.b $xr14, $xr12, 0
-; LA64-NEXT:    xvpermi.q $xr14, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr13, $t2
-; LA64-NEXT:    xvextrins.b $xr0, $xr14, 204
-; LA64-NEXT:    xvori.b $xr15, $xr13, 0
-; LA64-NEXT:    xvpermi.q $xr15, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr14, $t1
-; LA64-NEXT:    xvextrins.b $xr0, $xr15, 221
-; LA64-NEXT:    xvori.b $xr16, $xr14, 0
-; LA64-NEXT:    xvpermi.q $xr16, $xr0, 18
-; LA64-NEXT:    xvreplgr2vr.b $xr15, $t0
-; LA64-NEXT:    xvextrins.b $xr0, $xr16, 238
-; LA64-NEXT:    xvori.b $xr16, $xr15, 0
-; LA64-NEXT:    xvpermi.q $xr16, $xr0, 18
-; LA64-NEXT:    xvextrins.b $xr0, $xr16, 255
-; LA64-NEXT:    xvreplgr2vr.b $xr16, $a1
-; LA64-NEXT:    xvpermi.q $xr16, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr16, 0
-; LA64-NEXT:    xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr1, 17
-; LA64-NEXT:    xvpermi.q $xr2, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr2, 34
-; LA64-NEXT:    xvpermi.q $xr3, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr3, 51
-; LA64-NEXT:    xvpermi.q $xr4, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr4, 68
-; LA64-NEXT:    xvpermi.q $xr5, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr5, 85
-; LA64-NEXT:    xvpermi.q $xr6, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr6, 102
-; LA64-NEXT:    xvpermi.q $xr7, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr7, 119
-; LA64-NEXT:    xvpermi.q $xr8, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr8, 136
-; LA64-NEXT:    xvpermi.q $xr9, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr9, 153
-; LA64-NEXT:    xvpermi.q $xr10, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr10, 170
-; LA64-NEXT:    xvpermi.q $xr11, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr11, 187
-; LA64-NEXT:    xvpermi.q $xr12, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr12, 204
-; LA64-NEXT:    xvpermi.q $xr13, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr13, 221
-; LA64-NEXT:    xvpermi.q $xr14, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr14, 238
-; LA64-NEXT:    xvpermi.q $xr15, $xr0, 48
-; LA64-NEXT:    xvextrins.b $xr0, $xr15, 255
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a2, 1
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a3, 2
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a4, 3
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a5, 4
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a6, 5
+; LA64-NEXT:    vinsgr2vr.b $vr0, $a7, 6
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t8, 7
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t7, 8
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t6, 9
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t5, 10
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t4, 11
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t3, 12
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t2, 13
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t1, 14
+; LA64-NEXT:    vinsgr2vr.b $vr0, $t0, 15
+; LA64-NEXT:    xvreplve0.q $xr0, $xr0
 ; LA64-NEXT:    xvst $xr0, $a0, 0
 ; LA64-NEXT:    ret
 entry:
@@ -1124,100 +965,15 @@ define void @buildvector_v32i8_subseq_4(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; CHECK-LABEL: buildvector_v32i8_subseq_4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld.b $t0, $sp, 0
-; CHECK-NEXT:    xvreplgr2vr.b $xr1, $a2
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr2, $a3
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT:    xvori.b $xr4, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr3, $a4
-; CHECK-NEXT:    xvextrins.b $xr0, $xr4, 34
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr4, $a5
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT:    xvori.b $xr6, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr6, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr5, $a6
-; CHECK-NEXT:    xvextrins.b $xr0, $xr6, 68
-; CHECK-NEXT:    xvori.b $xr7, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr7, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr6, $a7
-; CHECK-NEXT:    xvextrins.b $xr0, $xr7, 85
-; CHECK-NEXT:    xvori.b $xr8, $xr6, 0
-; CHECK-NEXT:    xvpermi.q $xr8, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr7, $t0
-; CHECK-NEXT:    xvextrins.b $xr0, $xr8, 102
-; CHECK-NEXT:    xvori.b $xr9, $xr7, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr8, $a1
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 119
-; CHECK-NEXT:    xvori.b $xr9, $xr8, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 136
-; CHECK-NEXT:    xvori.b $xr9, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 153
-; CHECK-NEXT:    xvori.b $xr9, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 170
-; CHECK-NEXT:    xvori.b $xr9, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 187
-; CHECK-NEXT:    xvori.b $xr9, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 204
-; CHECK-NEXT:    xvori.b $xr9, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 221
-; CHECK-NEXT:    xvori.b $xr9, $xr6, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 238
-; CHECK-NEXT:    xvori.b $xr9, $xr7, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 255
-; CHECK-NEXT:    xvori.b $xr9, $xr8, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 0
-; CHECK-NEXT:    xvori.b $xr9, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 17
-; CHECK-NEXT:    xvori.b $xr9, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 34
-; CHECK-NEXT:    xvori.b $xr9, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 51
-; CHECK-NEXT:    xvori.b $xr9, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 68
-; CHECK-NEXT:    xvori.b $xr9, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 85
-; CHECK-NEXT:    xvori.b $xr9, $xr6, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 102
-; CHECK-NEXT:    xvori.b $xr9, $xr7, 0
-; CHECK-NEXT:    xvpermi.q $xr9, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr9, 119
-; CHECK-NEXT:    xvpermi.q $xr8, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr8, 136
-; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr1, 153
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr2, 170
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr4, 204
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 221
-; CHECK-NEXT:    xvpermi.q $xr6, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr6, 238
-; CHECK-NEXT:    xvpermi.q $xr7, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr7, 255
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 2
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 3
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a5, 4
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a6, 5
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a7, 6
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $t0, 7
+; CHECK-NEXT:    xvreplve0.d $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -1260,100 +1016,11 @@ entry:
 define void @buildvector_v32i8_subseq_8(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3) nounwind {
 ; CHECK-LABEL: buildvector_v32i8_subseq_8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvreplgr2vr.b $xr1, $a2
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr2, $a3
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT:    xvori.b $xr4, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr3, $a4
-; CHECK-NEXT:    xvextrins.b $xr0, $xr4, 34
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr4, $a1
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 68
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 85
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 102
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 119
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 136
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 153
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 170
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 187
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 204
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 221
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 238
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 255
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 0
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 17
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 34
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 51
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 68
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 85
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 102
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 119
-; CHECK-NEXT:    xvori.b $xr5, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 136
-; CHECK-NEXT:    xvori.b $xr5, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 153
-; CHECK-NEXT:    xvori.b $xr5, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 170
-; CHECK-NEXT:    xvori.b $xr5, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr5, 187
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr4, 204
-; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr1, 221
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr2, 238
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 255
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 2
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 3
+; CHECK-NEXT:    xvreplve0.w $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -1396,100 +1063,9 @@ entry:
 define void @buildvector_v32i8_subseq_16(ptr %dst, i8 %a0, i8 %a1) nounwind {
 ; CHECK-LABEL: buildvector_v32i8_subseq_16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvreplgr2vr.b $xr1, $a2
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 0
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.b $xr2, $a1
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 34
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 51
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 68
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 85
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 102
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 119
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 136
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 153
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 170
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 204
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 221
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 238
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 18
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 255
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 0
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 17
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 34
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 51
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 68
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 85
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 102
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 119
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 136
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 153
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 170
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 187
-; CHECK-NEXT:    xvori.b $xr3, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 204
-; CHECK-NEXT:    xvori.b $xr3, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr3, 221
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr2, 238
-; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT:    xvextrins.b $xr0, $xr1, 255
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT:    xvreplve0.h $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -1767,52 +1343,15 @@ define void @buildvector_v16i16_subseq_2(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i1
 ; CHECK-LABEL: buildvector_v16i16_subseq_2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld.h $t0, $sp, 0
-; CHECK-NEXT:    xvreplgr2vr.h $xr1, $a2
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 0
-; CHECK-NEXT:    xvori.b $xr2, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr3, $a3
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr4, $a4
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT:    xvori.b $xr2, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr5, $a5
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT:    xvori.b $xr2, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr6, $a6
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 68
-; CHECK-NEXT:    xvori.b $xr2, $xr6, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr7, $a7
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 85
-; CHECK-NEXT:    xvori.b $xr2, $xr7, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr8, $t0
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 102
-; CHECK-NEXT:    xvori.b $xr2, $xr8, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 119
-; CHECK-NEXT:    xvreplgr2vr.h $xr2, $a1
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 0
-; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr1, 17
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr3, 34
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr4, 51
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr5, 68
-; CHECK-NEXT:    xvpermi.q $xr6, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr6, 85
-; CHECK-NEXT:    xvpermi.q $xr7, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr7, 102
-; CHECK-NEXT:    xvpermi.q $xr8, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr8, 119
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a3, 2
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a4, 3
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a5, 4
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a6, 5
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a7, 6
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $t0, 7
+; CHECK-NEXT:    xvreplve0.q $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -1839,52 +1378,11 @@ entry:
 define void @buildvector_v16i16_subseq_4(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwind {
 ; CHECK-LABEL: buildvector_v16i16_subseq_4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvreplgr2vr.h $xr1, $a2
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 0
-; CHECK-NEXT:    xvori.b $xr2, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr3, $a3
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr4, $a4
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT:    xvori.b $xr2, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr5, $a1
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT:    xvori.b $xr2, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 68
-; CHECK-NEXT:    xvori.b $xr2, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 85
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 102
-; CHECK-NEXT:    xvori.b $xr2, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 18
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 119
-; CHECK-NEXT:    xvori.b $xr2, $xr5, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 0
-; CHECK-NEXT:    xvori.b $xr2, $xr1, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 17
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 34
-; CHECK-NEXT:    xvori.b $xr2, $xr4, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr2, 51
-; CHECK-NEXT:    xvpermi.q $xr5, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr5, 68
-; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr1, 85
-; CHECK-NEXT:    xvpermi.q $xr3, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr3, 102
-; CHECK-NEXT:    xvpermi.q $xr4, $xr0, 48
-; CHECK-NEXT:    xvextrins.h $xr0, $xr4, 119
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a3, 2
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a4, 3
+; CHECK-NEXT:    xvreplve0.d $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -1911,53 +1409,10 @@ entry:
 define void @buildvector_v16i16_subseq_8(ptr %dst, i16 %a0, i16 %a1) nounwind {
 ; CHECK-LABEL: buildvector_v16i16_subseq_8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvreplgr2vr.h $xr0, $a2
-; CHECK-NEXT:    vinsgr2vr.h $vr1, $a1, 0
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvreplgr2vr.h $xr3, $a1
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 17
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 34
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 51
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 68
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 85
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 102
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 18
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 119
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 0
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 17
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 34
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 51
-; CHECK-NEXT:    xvori.b $xr2, $xr3, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 68
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvpermi.q $xr2, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr2, 85
-; CHECK-NEXT:    xvpermi.q $xr3, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr3, 102
-; CHECK-NEXT:    xvpermi.q $xr0, $xr1, 48
-; CHECK-NEXT:    xvextrins.h $xr1, $xr0, 119
-; CHECK-NEXT:    xvst $xr1, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 1
+; CHECK-NEXT:    xvreplve0.w $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %ins0  = insertelement <16 x i16> undef,  i16 %a0, i32 0
@@ -2055,14 +1510,11 @@ entry:
 define void @buildvector_v8i32_subseq_2(ptr %dst, i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
 ; CHECK-LABEL: buildvector_v8i32_subseq_2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 1
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a3, 2
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a4, 3
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 5
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a3, 6
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a4, 7
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a2, 1
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a3, 2
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a4, 3
+; CHECK-NEXT:    xvreplve0.q $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -2081,14 +1533,9 @@ entry:
 define void @buildvector_v8i32_subseq_4(ptr %dst, i32 %a0, i32 %a1) nounwind {
 ; CHECK-LABEL: buildvector_v8i32_subseq_4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 1
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 2
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 3
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 5
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a1, 6
-; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a2, 7
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a2, 1
+; CHECK-NEXT:    xvreplve0.d $xr0, $xr0
 ; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -2191,23 +1638,19 @@ entry:
 define void @buildvector_v4i64_subseq_2(ptr %dst, i64 %a0, i64 %a1) nounwind {
 ; LA32-LABEL: buildvector_v4i64_subseq_2:
 ; LA32:       # %bb.0: # %entry
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 1
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a3, 2
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a4, 3
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a1, 4
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a2, 5
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a3, 6
-; LA32-NEXT:    xvinsgr2vr.w $xr0, $a4, 7
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 1
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a3, 2
+; LA32-NEXT:    vinsgr2vr.w $vr0, $a4, 3
+; LA32-NEXT:    xvreplve0.q $xr0, $xr0
 ; LA32-NEXT:    xvst $xr0, $a0, 0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: buildvector_v4i64_subseq_2:
 ; LA64:       # %bb.0: # %entry
-; LA64-NEXT:    xvinsgr2vr.d $xr0, $a1, 0
-; LA64-NEXT:    xvinsgr2vr.d $xr0, $a2, 1
-; LA64-NEXT:    xvinsgr2vr.d $xr0, $a1, 2
-; LA64-NEXT:    xvinsgr2vr.d $xr0, $a2, 3
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a1, 0
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a2, 1
+; LA64-NEXT:    xvreplve0.q $xr0, $xr0
 ; LA64-NEXT:    xvst $xr0, $a0, 0
 ; LA64-NEXT:    ret
 entry:
@@ -2309,19 +1752,15 @@ entry:
 define void @buildvector_v8f32_subseq_2(ptr %dst, float %a0, float %a1, float %a2, float %a3) nounwind {
 ; CHECK-LABEL: buildvector_v8f32_subseq_2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $f3 killed $f3 def $xr3
-; CHECK-NEXT:    # kill: def $f2 killed $f2 def $xr2
-; CHECK-NEXT:    # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT:    # kill: def $f3 killed $f3 def $vr3
+; CHECK-NEXT:    # kill: def $f2 killed $f2 def $vr2
+; CHECK-NEXT:    # kill: def $f1 killed $f1 def $vr1
 ; CHECK-NEXT:    # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT:    xvori.b $xr4, $xr0, 0
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr1, 1
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr2, 2
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr3, 3
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr0, 4
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr1, 5
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr2, 6
-; CHECK-NEXT:    xvinsve0.w $xr4, $xr3, 7
-; CHECK-NEXT:    xvst $xr4, $a0, 0
+; CHECK-NEXT:    vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT:    vextrins.w $vr0, $vr2, 32
+; CHECK-NEXT:    vextrins.w $vr0, $vr3, 48
+; CHECK-NEXT:    xvreplve0.q $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %ins0 = insertelement <8 x float> undef, float %a0, i32 0
@@ -2339,17 +1778,11 @@ entry:
 define void @buildvector_v8f32_subseq_4(ptr %dst, float %a0, float %a1) nounwind {
 ; CHECK-LABEL: buildvector_v8f32_subseq_4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT:    # kill: def $f1 killed $f1 def $vr1
 ; CHECK-NEXT:    # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr1, 1
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr0, 2
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr1, 3
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr0, 4
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr1, 5
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr0, 6
-; CHECK-NEXT:    xvinsve0.w $xr2, $xr1, 7
-; CHECK-NEXT:    xvst $xr2, $a0, 0
+; CHECK-NEXT:    vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT:    xvreplve0.d $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %ins0 = insertelement <8 x float> undef, float %a0, i32 0
@@ -2424,13 +1857,11 @@ entry:
 define void @buildvector_v4f64_subseq_2(ptr %dst, double %a0, double %a1) nounwind {
 ; CHECK-LABEL: buildvector_v4f64_subseq_2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $f1_64 killed $f1_64 def $xr1
+; CHECK-NEXT:    # kill: def $f1_64 killed $f1_64 def $vr1
 ; CHECK-NEXT:    # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT:    xvori.b $xr2, $xr0, 0
-; CHECK-NEXT:    xvinsve0.d $xr2, $xr1, 1
-; CHECK-NEXT:    xvinsve0.d $xr2, $xr0, 2
-; CHECK-NEXT:    xvinsve0.d $xr2, $xr1, 3
-; CHECK-NEXT:    xvst $xr2, $a0, 0
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT:    xvreplve0.q $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %ins0 = insertelement <4 x double> undef, double %a0, i32 0
diff --git a/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll b/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
index 349684ff22be2..41c38b01ce98e 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/broadcast-load.ll
@@ -11,8 +11,7 @@ define <2 x i64> @should_not_be_optimized(ptr %ptr, ptr %dst){
 ; LA32-NEXT:    st.w $a2, $a1, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 2
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; LA32-NEXT:    st.w $a0, $a1, 4
 ; LA32-NEXT:    ret
 ;
@@ -62,8 +61,7 @@ define <2 x i64> @vldrepl_d_unaligned_offset(ptr %ptr) {
 ; LA32-NEXT:    ld.w $a0, $a0, 8
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: vldrepl_d_unaligned_offset:
@@ -155,8 +153,7 @@ define <2 x i64> @vldrepl_d(ptr %ptr) {
 ; LA32-NEXT:    ld.w $a0, $a0, 4
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: vldrepl_d:
@@ -176,8 +173,7 @@ define <2 x i64> @vldrepl_d_offset(ptr %ptr) {
 ; LA32-NEXT:    ld.w $a0, $a0, 268
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 1
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a0, 3
+; LA32-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; LA32-NEXT:    ret
 ;
 ; LA64-LABEL: vldrepl_d_offset:
diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
index 24df71c2ad71b..f27eeb9e4a355 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
@@ -46,8 +46,7 @@ define void @buildvector_v2i64_splat(ptr %dst, i64 %a0) nounwind {
 ; LA32:       # %bb.0: # %entry
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 0
 ; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 1
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a1, 2
-; LA32-NEXT:    vinsgr2vr.w $vr0, $a2, 3
+; LA32-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; LA32-NEXT:    vst $vr0, $a0, 0
 ; LA32-NEXT:    ret
 ;
@@ -397,14 +396,7 @@ define void @buildvector_v16i8_subseq_2(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a6, 5
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a7, 6
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $t0, 7
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 10
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 11
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a5, 12
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a6, 13
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a7, 14
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $t0, 15
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -435,18 +427,7 @@ define void @buildvector_v16i8_subseq_4(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 1
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 2
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 3
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 4
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 5
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 6
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 7
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 10
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 11
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 12
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 13
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a3, 14
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a4, 15
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -475,20 +456,7 @@ define void @buildvector_v16i8_subseq_8(ptr %dst, i8 %a0, i8 %a1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 0
 ; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 1
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 2
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 3
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 4
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 5
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 6
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 7
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 8
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 9
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 10
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 11
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 12
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 13
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a1, 14
-; CHECK-NEXT:    vinsgr2vr.b $vr0, $a2, 15
+; CHECK-NEXT:    vreplvei.h $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -591,10 +559,7 @@ define void @buildvector_v8i16_subseq_2(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i16
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 1
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a3, 2
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a4, 3
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 4
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 5
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a3, 6
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a4, 7
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -615,12 +580,7 @@ define void @buildvector_v8i16_subseq_4(ptr %dst, i16 %a0, i16 %a1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 1
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 2
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 3
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 4
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 5
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a1, 6
-; CHECK-NEXT:    vinsgr2vr.h $vr0, $a2, 7
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -693,8 +653,7 @@ define void @buildvector_v4i32_subseq_2(ptr %dst, i32 %a0, i32 %a1) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a1, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a2, 1
-; CHECK-NEXT:    vinsgr2vr.w $vr0, $a1, 2
-; CHECK-NEXT:    vinsgr2vr.w $vr0, $a2, 3
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
 ; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
@@ -835,11 +794,9 @@ define void @buildvector_v4f32_subseq_2(ptr %dst, float %a0, float %a1) nounwind
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    # kill: def $f1 killed $f1 def $vr1
 ; CHECK-NEXT:    # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT:    vori.b $vr2, $vr0, 0
-; CHECK-NEXT:    vextrins.w $vr2, $vr1, 16
-; CHECK-NEXT:    vextrins.w $vr2, $vr0, 32
-; CHECK-NEXT:    vextrins.w $vr2, $vr1, 48
-; CHECK-NEXT:    vst $vr2, $a0, 0
+; CHECK-NEXT:    vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %ins0 = insertelement <4 x float> undef, float %a0, i32 0



More information about the llvm-commits mailing list