[llvm] [LoongArch] Split 256-bit build_vector to avoid using LASX element insertion (PR #154918)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 22 01:10:16 PDT 2025
https://github.com/zhaoqi5 updated https://github.com/llvm/llvm-project/pull/154918
>From 13fae6b32749ce7943b4fa18fc8430b493a86af0 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Fri, 22 Aug 2025 17:40:01 +0800
Subject: [PATCH 1/6] [LoongArch] Spill 256-bit build_vector to avoid using
LASX element insertion
Note: Only worse for v8i32/v8f32/v4i64/v4f64 types when the high
part only has one non-undef element.
---
.../LoongArch/LoongArchISelLowering.cpp | 43 ++++++++++++++-----
1 file changed, 33 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index dad5758f8d054..5cde8ac20a161 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2805,24 +2805,47 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
return DAG.getBitcast(ResTy, SplatVec);
}
- // Use INSERT_VECTOR_ELT operations rather than expand to stores.
- // The resulting code is the same length as the expansion, but it doesn't
- // use memory operations.
- assert(ResTy.isVector());
+ // Use INSERT_VECTOR_ELT operations rather than expand to stores, because
+ // using memory operations is much lower.
+ EVT VecTy = ResTy;
+ unsigned VecNumElts = NumElts;
+
+ // Split the 256-bits vector and fill them separately, concat the two parts
+ // to get the result vector.
+ if (Is256Vec) {
+ VecTy = ResTy.getHalfNumVectorElementsVT(*DAG.getContext());
+ VecNumElts = NumElts / 2;
+ }
+ SDValue Vector = DAG.getUNDEF(VecTy);
SDValue Op0 = Node->getOperand(0);
- SDValue Vector = DAG.getUNDEF(ResTy);
-
if (!Op0.isUndef())
- Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Op0);
- for (unsigned i = 1; i < NumElts; ++i) {
+ Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecTy, Op0);
+ for (unsigned i = 1; i < VecNumElts; ++i) {
SDValue Opi = Node->getOperand(i);
if (Opi.isUndef())
continue;
- Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Opi,
+ Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecTy, Vector, Opi,
DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
}
- return Vector;
+
+ if (Is128Vec)
+ return Vector;
+
+ SDValue VectorHi = DAG.getUNDEF(VecTy);
+ SDValue OpHi0 = Node->getOperand(VecNumElts);
+ if (!OpHi0.isUndef())
+ VectorHi = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecTy, OpHi0);
+ for (unsigned i = VecNumElts + 1; i < NumElts; ++i) {
+ SDValue Opi = Node->getOperand(i);
+ if (Opi.isUndef())
+ continue;
+ VectorHi = DAG.getNode(
+ ISD::INSERT_VECTOR_ELT, DL, VecTy, VectorHi, Opi,
+ DAG.getConstant(i - VecNumElts, DL, Subtarget.getGRLenVT()));
+ }
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResTy, Vector, VectorHi);
}
return SDValue();
>From 993237bc26eaedcad0a25ec50cbc0a51f1b00bb1 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Sat, 23 Aug 2025 14:49:23 +0800
Subject: [PATCH 2/6] update
---
.../LoongArch/LoongArchISelLowering.cpp | 102 +++++++++---------
1 file changed, 52 insertions(+), 50 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 5cde8ac20a161..7e1912174a247 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2647,6 +2647,43 @@ static SDValue lowerBUILD_VECTORAsBroadCastLoad(BuildVectorSDNode *BVOp,
return SDValue();
}
+// Sequentially insert elements from Ops into Vector, from low to high indices.
+// Note: Ops can have fewer elements than Vector.
+static void fillVector(ArrayRef<SDValue> Ops, SelectionDAG &DAG, SDLoc DL,
+ const LoongArchSubtarget &Subtarget, SDValue &Vector,
+ EVT ResTy) {
+ assert(Ops.size() <= ResTy.getVectorNumElements());
+
+ SDValue Op0 = Ops[0];
+ if (!Op0.isUndef())
+ Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ResTy, Op0);
+ for (unsigned i = 1; i < Ops.size(); ++i) {
+ SDValue Opi = Ops[i];
+ if (Opi.isUndef())
+ continue;
+ Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, Opi,
+ DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+ }
+}
+
+// Build a ResTy subvector from Node, taking NumElts elements starting at index
+// 'first'.
+static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node,
+ SelectionDAG &DAG, SDLoc DL,
+ const LoongArchSubtarget &Subtarget,
+ EVT ResTy, unsigned first) {
+ unsigned NumElts = ResTy.getVectorNumElements();
+
+ assert(first >= 0 &&
+ first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements());
+
+ SmallVector<SDValue, 16> Ops(Node->op_begin() + first,
+ Node->op_begin() + first + NumElts);
+ SDValue Vector = DAG.getUNDEF(ResTy);
+ fillVector(Ops, DAG, DL, Subtarget, Vector, ResTy);
+ return Vector;
+}
+
SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
@@ -2756,29 +2793,18 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
BitVector UndefElements;
if (Node->getRepeatedSequence(Sequence, &UndefElements) &&
UndefElements.count() == 0) {
- SDValue Vector = DAG.getUNDEF(ResTy);
- SDValue FillVec = Vector;
- EVT FillTy = ResTy;
-
// Using LSX instructions to fill the sub-sequence of 256-bits vector,
// because the high part can be simply treated as undef.
- if (Is256Vec) {
- FillTy = ResTy.getHalfNumVectorElementsVT(*DAG.getContext());
- FillVec = DAG.getExtractSubvector(DL, FillTy, Vector, 0);
- }
+ SDValue Vector = DAG.getUNDEF(ResTy);
+ EVT FillTy = Is256Vec
+ ? ResTy.getHalfNumVectorElementsVT(*DAG.getContext())
+ : ResTy;
+ SDValue FillVec =
+ Is256Vec ? DAG.getExtractSubvector(DL, FillTy, Vector, 0) : Vector;
- SDValue Op0 = Sequence[0];
- unsigned SeqLen = Sequence.size();
- if (!Op0.isUndef())
- FillVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, FillTy, Op0);
- for (unsigned i = 1; i < SeqLen; ++i) {
- SDValue Opi = Sequence[i];
- if (Opi.isUndef())
- continue;
- FillVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, FillTy, FillVec, Opi,
- DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
- }
+ fillVector(Sequence, DAG, DL, Subtarget, FillVec, FillTy);
+ unsigned SeqLen = Sequence.size();
unsigned SplatLen = NumElts / SeqLen;
MVT SplatEltTy = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
MVT SplatTy = MVT::getVectorVT(SplatEltTy, SplatLen);
@@ -2807,43 +2833,19 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
// Use INSERT_VECTOR_ELT operations rather than expand to stores, because
// using memory operations is much lower.
- EVT VecTy = ResTy;
- unsigned VecNumElts = NumElts;
-
+ //
// Split the 256-bits vector and fill them separately, concat the two parts
// to get the result vector.
- if (Is256Vec) {
- VecTy = ResTy.getHalfNumVectorElementsVT(*DAG.getContext());
- VecNumElts = NumElts / 2;
- }
-
- SDValue Vector = DAG.getUNDEF(VecTy);
- SDValue Op0 = Node->getOperand(0);
- if (!Op0.isUndef())
- Vector = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecTy, Op0);
- for (unsigned i = 1; i < VecNumElts; ++i) {
- SDValue Opi = Node->getOperand(i);
- if (Opi.isUndef())
- continue;
- Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecTy, Vector, Opi,
- DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
- }
+ EVT VecTy =
+ Is256Vec ? ResTy.getHalfNumVectorElementsVT(*DAG.getContext()) : ResTy;
+ SDValue Vector =
+ fillSubVectorFromBuildVector(Node, DAG, DL, Subtarget, VecTy, 0);
if (Is128Vec)
return Vector;
- SDValue VectorHi = DAG.getUNDEF(VecTy);
- SDValue OpHi0 = Node->getOperand(VecNumElts);
- if (!OpHi0.isUndef())
- VectorHi = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecTy, OpHi0);
- for (unsigned i = VecNumElts + 1; i < NumElts; ++i) {
- SDValue Opi = Node->getOperand(i);
- if (Opi.isUndef())
- continue;
- VectorHi = DAG.getNode(
- ISD::INSERT_VECTOR_ELT, DL, VecTy, VectorHi, Opi,
- DAG.getConstant(i - VecNumElts, DL, Subtarget.getGRLenVT()));
- }
+ SDValue VectorHi = fillSubVectorFromBuildVector(Node, DAG, DL, Subtarget,
+ VecTy, NumElts / 2);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResTy, Vector, VectorHi);
}
>From 45a0adba650bb67913bd6f7f9af772b2094226bc Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Sat, 23 Aug 2025 16:49:37 +0800
Subject: [PATCH 3/6] update
---
.../LoongArch/LoongArchISelLowering.cpp | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 7e1912174a247..26f89a4e0dc02 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2834,8 +2834,23 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
// Use INSERT_VECTOR_ELT operations rather than expand to stores, because
// using memory operations is much lower.
//
- // Split the 256-bits vector and fill them separately, concat the two parts
- // to get the result vector.
+ // For 256-bit vectors, normally spill into two halves and concatenate.
+ // Special case: for v8i32/v8f32/v4i64/v4f64, if the upper half has only
+ // one non-undef element, skip spilling to avoid a worse result.
+ if (ResTy == MVT::v8i32 || ResTy == MVT::v8f32 || ResTy == MVT::v4i64 ||
+ ResTy == MVT::v4f64) {
+ unsigned NonUndefCount = 0;
+ for (unsigned i = NumElts / 2; i < NumElts; ++i) {
+ if (!Node->getOperand(i).isUndef()) {
+ ++NonUndefCount;
+ if (NonUndefCount > 1)
+ break;
+ }
+ }
+ if (NonUndefCount == 1)
+ return fillSubVectorFromBuildVector(Node, DAG, DL, Subtarget, ResTy, 0);
+ }
+
EVT VecTy =
Is256Vec ? ResTy.getHalfNumVectorElementsVT(*DAG.getContext()) : ResTy;
SDValue Vector =
>From 7bf017ff84baec46b65b3777350d3fa706d1fdd1 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Sat, 23 Aug 2025 17:19:42 +0800
Subject: [PATCH 4/6] fix typo
---
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 26f89a4e0dc02..6e12446515d5b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2834,9 +2834,9 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
// Use INSERT_VECTOR_ELT operations rather than expand to stores, because
// using memory operations is much lower.
//
- // For 256-bit vectors, normally spill into two halves and concatenate.
+ // For 256-bit vectors, normally split into two halves and concatenate.
// Special case: for v8i32/v8f32/v4i64/v4f64, if the upper half has only
- // one non-undef element, skip spilling to avoid a worse result.
+ // one non-undef element, skip spliting to avoid a worse result.
if (ResTy == MVT::v8i32 || ResTy == MVT::v8f32 || ResTy == MVT::v4i64 ||
ResTy == MVT::v4f64) {
unsigned NonUndefCount = 0;
>From 580c2ad42a142f6801129a0b72684d1d26620482 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 2 Sep 2025 11:21:19 +0800
Subject: [PATCH 5/6] la32 passed
---
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 6e12446515d5b..4b78b14a2a89d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -1978,9 +1978,8 @@ lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
if (fitsRegularPattern<int>(Begin, 1, End - HalfSize, SplatIndex, 0) &&
fitsRegularPattern<int>(Begin + HalfSize, 1, End, SplatIndex + HalfSize,
0)) {
- APInt Imm(64, SplatIndex);
return DAG.getNode(LoongArchISD::VREPLVEI, DL, VT, V1,
- DAG.getConstant(Imm, DL, Subtarget.getGRLenVT()));
+ DAG.getConstant(SplatIndex, DL, Subtarget.getGRLenVT()));
}
return SDValue();
>From fa11be322e11e60c15e1a2365c32e6da6bbfbd60 Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 2 Sep 2025 11:22:11 +0800
Subject: [PATCH 6/6] update tests
---
.../test/CodeGen/LoongArch/lasx/bitreverse.ll | 72 +-
.../CodeGen/LoongArch/lasx/build-vector.ll | 821 ++++++------------
llvm/test/CodeGen/LoongArch/lasx/fpowi.ll | 139 ++-
.../lasx/ir-instruction/fix-xvshuf.ll | 13 +-
.../lasx/ir-instruction/insertelement.ll | 10 +-
.../LoongArch/lasx/scalar-to-vector.ll | 8 +-
6 files changed, 395 insertions(+), 668 deletions(-)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll b/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll
index 11f1bce55fad6..87ee4ad025395 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll
@@ -7,18 +7,19 @@ declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>)
define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; CHECK-LABEL: test_bitreverse_v32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
-; CHECK-NEXT: bitrev.8b $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 0
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
-; CHECK-NEXT: bitrev.8b $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
; CHECK-NEXT: bitrev.8b $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 2
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
; CHECK-NEXT: bitrev.8b $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 3
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
+; CHECK-NEXT: bitrev.8b $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT: bitrev.8b $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1
+; CHECK-NEXT: xvpermi.q $xr1, $xr2, 2
; CHECK-NEXT: xvori.b $xr0, $xr1, 0
; CHECK-NEXT: ret
%b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
@@ -30,19 +31,20 @@ declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
; CHECK-LABEL: test_bitreverse_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 0
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 2
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 3
-; CHECK-NEXT: xvshuf4i.h $xr0, $xr1, 27
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1
+; CHECK-NEXT: xvpermi.q $xr2, $xr1, 2
+; CHECK-NEXT: xvshuf4i.h $xr0, $xr2, 27
; CHECK-NEXT: ret
%b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
ret <16 x i16> %b
@@ -53,19 +55,20 @@ declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
; CHECK-LABEL: test_bitreverse_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 0
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 2
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 3
-; CHECK-NEXT: xvshuf4i.w $xr0, $xr1, 177
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1
+; CHECK-NEXT: xvpermi.q $xr2, $xr1, 2
+; CHECK-NEXT: xvshuf4i.w $xr0, $xr2, 177
; CHECK-NEXT: ret
%b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
ret <8 x i32> %b
@@ -76,18 +79,19 @@ declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
; CHECK-LABEL: test_bitreverse_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 0
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
-; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 1
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 2
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
; CHECK-NEXT: bitrev.d $a0, $a0
-; CHECK-NEXT: xvinsgr2vr.d $xr1, $a0, 3
+; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT: bitrev.d $a0, $a0
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1
+; CHECK-NEXT: xvpermi.q $xr1, $xr2, 2
; CHECK-NEXT: xvori.b $xr0, $xr1, 0
; CHECK-NEXT: ret
%b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
index 52bc1b2696085..7575bc1a9d3d2 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
@@ -298,286 +298,128 @@ entry:
define void @buildvector_v32i8(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) nounwind {
; LA32-LABEL: buildvector_v32i8:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: addi.w $sp, $sp, -80
-; LA32-NEXT: fst.d $fs0, $sp, 72 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs1, $sp, 64 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs2, $sp, 56 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs3, $sp, 48 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs4, $sp, 40 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs5, $sp, 32 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs6, $sp, 24 # 8-byte Folded Spill
-; LA32-NEXT: fst.d $fs7, $sp, 16 # 8-byte Folded Spill
-; LA32-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a2
-; LA32-NEXT: xvreplgr2vr.b $xr2, $a3
-; LA32-NEXT: xvreplgr2vr.b $xr3, $a4
-; LA32-NEXT: ld.b $a1, $sp, 172
-; LA32-NEXT: xvreplgr2vr.b $xr4, $a5
-; LA32-NEXT: ld.b $a2, $sp, 80
-; LA32-NEXT: xvreplgr2vr.b $xr5, $a6
-; LA32-NEXT: ld.b $a3, $sp, 84
-; LA32-NEXT: xvreplgr2vr.b $xr6, $a7
-; LA32-NEXT: ld.b $a4, $sp, 88
-; LA32-NEXT: xvreplgr2vr.b $xr7, $a2
-; LA32-NEXT: ld.b $a2, $sp, 92
-; LA32-NEXT: xvreplgr2vr.b $xr8, $a3
-; LA32-NEXT: ld.b $a3, $sp, 96
-; LA32-NEXT: xvreplgr2vr.b $xr9, $a4
-; LA32-NEXT: ld.b $a4, $sp, 100
-; LA32-NEXT: xvreplgr2vr.b $xr10, $a2
-; LA32-NEXT: ld.b $a2, $sp, 104
-; LA32-NEXT: xvreplgr2vr.b $xr11, $a3
-; LA32-NEXT: ld.b $a3, $sp, 108
-; LA32-NEXT: xvreplgr2vr.b $xr12, $a4
-; LA32-NEXT: ld.b $a4, $sp, 112
-; LA32-NEXT: xvreplgr2vr.b $xr13, $a2
-; LA32-NEXT: ld.b $a2, $sp, 116
-; LA32-NEXT: xvreplgr2vr.b $xr14, $a3
-; LA32-NEXT: ld.b $a3, $sp, 120
-; LA32-NEXT: xvreplgr2vr.b $xr15, $a4
-; LA32-NEXT: ld.b $a4, $sp, 124
-; LA32-NEXT: xvreplgr2vr.b $xr16, $a2
-; LA32-NEXT: ld.b $a2, $sp, 128
-; LA32-NEXT: xvreplgr2vr.b $xr17, $a3
-; LA32-NEXT: ld.b $a3, $sp, 132
-; LA32-NEXT: xvreplgr2vr.b $xr18, $a4
-; LA32-NEXT: ld.b $a4, $sp, 136
-; LA32-NEXT: xvreplgr2vr.b $xr19, $a2
-; LA32-NEXT: ld.b $a2, $sp, 140
-; LA32-NEXT: xvreplgr2vr.b $xr20, $a3
-; LA32-NEXT: ld.b $a3, $sp, 144
-; LA32-NEXT: xvreplgr2vr.b $xr21, $a4
-; LA32-NEXT: ld.b $a4, $sp, 148
-; LA32-NEXT: xvreplgr2vr.b $xr22, $a2
-; LA32-NEXT: ld.b $a2, $sp, 152
-; LA32-NEXT: xvreplgr2vr.b $xr23, $a3
-; LA32-NEXT: ld.b $a3, $sp, 156
-; LA32-NEXT: xvreplgr2vr.b $xr24, $a4
-; LA32-NEXT: ld.b $a4, $sp, 160
-; LA32-NEXT: xvreplgr2vr.b $xr25, $a2
-; LA32-NEXT: ld.b $a2, $sp, 164
-; LA32-NEXT: xvreplgr2vr.b $xr26, $a3
-; LA32-NEXT: ld.b $a3, $sp, 168
-; LA32-NEXT: xvreplgr2vr.b $xr27, $a4
-; LA32-NEXT: ld.b $a4, $sp, 176
-; LA32-NEXT: xvreplgr2vr.b $xr28, $a2
-; LA32-NEXT: xvreplgr2vr.b $xr29, $a3
-; LA32-NEXT: xvreplgr2vr.b $xr30, $a1
-; LA32-NEXT: xvreplgr2vr.b $xr31, $a4
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA32-NEXT: xvpermi.q $xr2, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr2, 34
-; LA32-NEXT: xvpermi.q $xr3, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr3, 51
-; LA32-NEXT: xvpermi.q $xr4, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr4, 68
-; LA32-NEXT: xvpermi.q $xr5, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr5, 85
-; LA32-NEXT: xvpermi.q $xr6, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr6, 102
-; LA32-NEXT: xvpermi.q $xr7, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr7, 119
-; LA32-NEXT: xvpermi.q $xr8, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr8, 136
-; LA32-NEXT: xvpermi.q $xr9, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr9, 153
-; LA32-NEXT: xvpermi.q $xr10, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr10, 170
-; LA32-NEXT: xvpermi.q $xr11, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr11, 187
-; LA32-NEXT: xvpermi.q $xr12, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr12, 204
-; LA32-NEXT: xvpermi.q $xr13, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr13, 221
-; LA32-NEXT: xvpermi.q $xr14, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr14, 238
-; LA32-NEXT: xvpermi.q $xr15, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr15, 255
-; LA32-NEXT: xvpermi.q $xr16, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr16, 0
-; LA32-NEXT: xvpermi.q $xr17, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr17, 17
-; LA32-NEXT: xvpermi.q $xr18, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr18, 34
-; LA32-NEXT: xvpermi.q $xr19, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr19, 51
-; LA32-NEXT: xvpermi.q $xr20, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr20, 68
-; LA32-NEXT: xvpermi.q $xr21, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr21, 85
-; LA32-NEXT: xvpermi.q $xr22, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr22, 102
-; LA32-NEXT: xvpermi.q $xr23, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr23, 119
-; LA32-NEXT: xvpermi.q $xr24, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr24, 136
-; LA32-NEXT: xvpermi.q $xr25, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr25, 153
-; LA32-NEXT: xvpermi.q $xr26, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr26, 170
-; LA32-NEXT: xvpermi.q $xr27, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr27, 187
-; LA32-NEXT: xvpermi.q $xr28, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr28, 204
-; LA32-NEXT: xvpermi.q $xr29, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr29, 221
-; LA32-NEXT: xvpermi.q $xr30, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr30, 238
-; LA32-NEXT: xvpermi.q $xr31, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr31, 255
-; LA32-NEXT: xvst $xr0, $a0, 0
-; LA32-NEXT: fld.d $fs7, $sp, 16 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs6, $sp, 24 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs5, $sp, 32 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs4, $sp, 40 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs3, $sp, 48 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs2, $sp, 56 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs1, $sp, 64 # 8-byte Folded Reload
-; LA32-NEXT: fld.d $fs0, $sp, 72 # 8-byte Folded Reload
-; LA32-NEXT: addi.w $sp, $sp, 80
+; LA32-NEXT: ld.b $t0, $sp, 36
+; LA32-NEXT: ld.b $t1, $sp, 92
+; LA32-NEXT: ld.b $t2, $sp, 40
+; LA32-NEXT: ld.b $t3, $sp, 44
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 0
+; LA32-NEXT: ld.b $t0, $sp, 48
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 1
+; LA32-NEXT: ld.b $t2, $sp, 52
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 2
+; LA32-NEXT: ld.b $t3, $sp, 56
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 3
+; LA32-NEXT: ld.b $t0, $sp, 60
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 4
+; LA32-NEXT: ld.b $t2, $sp, 64
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 5
+; LA32-NEXT: ld.b $t3, $sp, 68
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 6
+; LA32-NEXT: ld.b $t0, $sp, 72
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 7
+; LA32-NEXT: ld.b $t2, $sp, 76
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 8
+; LA32-NEXT: ld.b $t3, $sp, 80
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 9
+; LA32-NEXT: ld.b $t0, $sp, 84
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 10
+; LA32-NEXT: ld.b $t2, $sp, 88
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 11
+; LA32-NEXT: ld.b $t3, $sp, 96
+; LA32-NEXT: vinsgr2vr.b $vr0, $t0, 12
+; LA32-NEXT: vinsgr2vr.b $vr0, $t2, 13
+; LA32-NEXT: vinsgr2vr.b $vr0, $t1, 14
+; LA32-NEXT: vinsgr2vr.b $vr0, $t3, 15
+; LA32-NEXT: vinsgr2vr.b $vr1, $a1, 0
+; LA32-NEXT: vinsgr2vr.b $vr1, $a2, 1
+; LA32-NEXT: vinsgr2vr.b $vr1, $a3, 2
+; LA32-NEXT: vinsgr2vr.b $vr1, $a4, 3
+; LA32-NEXT: ld.b $a1, $sp, 28
+; LA32-NEXT: vinsgr2vr.b $vr1, $a5, 4
+; LA32-NEXT: ld.b $a2, $sp, 0
+; LA32-NEXT: vinsgr2vr.b $vr1, $a6, 5
+; LA32-NEXT: ld.b $a3, $sp, 4
+; LA32-NEXT: vinsgr2vr.b $vr1, $a7, 6
+; LA32-NEXT: ld.b $a4, $sp, 8
+; LA32-NEXT: vinsgr2vr.b $vr1, $a2, 7
+; LA32-NEXT: ld.b $a2, $sp, 12
+; LA32-NEXT: vinsgr2vr.b $vr1, $a3, 8
+; LA32-NEXT: ld.b $a3, $sp, 16
+; LA32-NEXT: vinsgr2vr.b $vr1, $a4, 9
+; LA32-NEXT: ld.b $a4, $sp, 20
+; LA32-NEXT: vinsgr2vr.b $vr1, $a2, 10
+; LA32-NEXT: ld.b $a2, $sp, 24
+; LA32-NEXT: vinsgr2vr.b $vr1, $a3, 11
+; LA32-NEXT: ld.b $a3, $sp, 32
+; LA32-NEXT: vinsgr2vr.b $vr1, $a4, 12
+; LA32-NEXT: vinsgr2vr.b $vr1, $a2, 13
+; LA32-NEXT: vinsgr2vr.b $vr1, $a1, 14
+; LA32-NEXT: vinsgr2vr.b $vr1, $a3, 15
+; LA32-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA32-NEXT: xvst $xr1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: buildvector_v32i8:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: addi.d $sp, $sp, -80
-; LA64-NEXT: fst.d $fs0, $sp, 72 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs1, $sp, 64 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs2, $sp, 56 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs3, $sp, 48 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs4, $sp, 40 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs5, $sp, 32 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs6, $sp, 24 # 8-byte Folded Spill
-; LA64-NEXT: fst.d $fs7, $sp, 16 # 8-byte Folded Spill
-; LA64-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a2
-; LA64-NEXT: xvreplgr2vr.b $xr2, $a3
-; LA64-NEXT: xvreplgr2vr.b $xr3, $a4
-; LA64-NEXT: ld.b $a1, $sp, 264
-; LA64-NEXT: xvreplgr2vr.b $xr4, $a5
-; LA64-NEXT: ld.b $a2, $sp, 80
-; LA64-NEXT: xvreplgr2vr.b $xr5, $a6
-; LA64-NEXT: ld.b $a3, $sp, 88
-; LA64-NEXT: xvreplgr2vr.b $xr6, $a7
-; LA64-NEXT: ld.b $a4, $sp, 96
-; LA64-NEXT: xvreplgr2vr.b $xr7, $a2
-; LA64-NEXT: ld.b $a2, $sp, 104
-; LA64-NEXT: xvreplgr2vr.b $xr8, $a3
-; LA64-NEXT: ld.b $a3, $sp, 112
-; LA64-NEXT: xvreplgr2vr.b $xr9, $a4
-; LA64-NEXT: ld.b $a4, $sp, 120
-; LA64-NEXT: xvreplgr2vr.b $xr10, $a2
-; LA64-NEXT: ld.b $a2, $sp, 128
-; LA64-NEXT: xvreplgr2vr.b $xr11, $a3
-; LA64-NEXT: ld.b $a3, $sp, 136
-; LA64-NEXT: xvreplgr2vr.b $xr12, $a4
-; LA64-NEXT: ld.b $a4, $sp, 144
-; LA64-NEXT: xvreplgr2vr.b $xr13, $a2
-; LA64-NEXT: ld.b $a2, $sp, 152
-; LA64-NEXT: xvreplgr2vr.b $xr14, $a3
-; LA64-NEXT: ld.b $a3, $sp, 160
-; LA64-NEXT: xvreplgr2vr.b $xr15, $a4
-; LA64-NEXT: ld.b $a4, $sp, 168
-; LA64-NEXT: xvreplgr2vr.b $xr16, $a2
-; LA64-NEXT: ld.b $a2, $sp, 176
-; LA64-NEXT: xvreplgr2vr.b $xr17, $a3
-; LA64-NEXT: ld.b $a3, $sp, 184
-; LA64-NEXT: xvreplgr2vr.b $xr18, $a4
-; LA64-NEXT: ld.b $a4, $sp, 192
-; LA64-NEXT: xvreplgr2vr.b $xr19, $a2
-; LA64-NEXT: ld.b $a2, $sp, 200
-; LA64-NEXT: xvreplgr2vr.b $xr20, $a3
-; LA64-NEXT: ld.b $a3, $sp, 208
-; LA64-NEXT: xvreplgr2vr.b $xr21, $a4
-; LA64-NEXT: ld.b $a4, $sp, 216
-; LA64-NEXT: xvreplgr2vr.b $xr22, $a2
-; LA64-NEXT: ld.b $a2, $sp, 224
-; LA64-NEXT: xvreplgr2vr.b $xr23, $a3
-; LA64-NEXT: ld.b $a3, $sp, 232
-; LA64-NEXT: xvreplgr2vr.b $xr24, $a4
-; LA64-NEXT: ld.b $a4, $sp, 240
-; LA64-NEXT: xvreplgr2vr.b $xr25, $a2
-; LA64-NEXT: ld.b $a2, $sp, 248
-; LA64-NEXT: xvreplgr2vr.b $xr26, $a3
-; LA64-NEXT: ld.b $a3, $sp, 256
-; LA64-NEXT: xvreplgr2vr.b $xr27, $a4
-; LA64-NEXT: ld.b $a4, $sp, 272
-; LA64-NEXT: xvreplgr2vr.b $xr28, $a2
-; LA64-NEXT: xvreplgr2vr.b $xr29, $a3
-; LA64-NEXT: xvreplgr2vr.b $xr30, $a1
-; LA64-NEXT: xvreplgr2vr.b $xr31, $a4
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA64-NEXT: xvpermi.q $xr2, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr2, 34
-; LA64-NEXT: xvpermi.q $xr3, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr3, 51
-; LA64-NEXT: xvpermi.q $xr4, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr4, 68
-; LA64-NEXT: xvpermi.q $xr5, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr5, 85
-; LA64-NEXT: xvpermi.q $xr6, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr6, 102
-; LA64-NEXT: xvpermi.q $xr7, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr7, 119
-; LA64-NEXT: xvpermi.q $xr8, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr8, 136
-; LA64-NEXT: xvpermi.q $xr9, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr9, 153
-; LA64-NEXT: xvpermi.q $xr10, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr10, 170
-; LA64-NEXT: xvpermi.q $xr11, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr11, 187
-; LA64-NEXT: xvpermi.q $xr12, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr12, 204
-; LA64-NEXT: xvpermi.q $xr13, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr13, 221
-; LA64-NEXT: xvpermi.q $xr14, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr14, 238
-; LA64-NEXT: xvpermi.q $xr15, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr15, 255
-; LA64-NEXT: xvpermi.q $xr16, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr16, 0
-; LA64-NEXT: xvpermi.q $xr17, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr17, 17
-; LA64-NEXT: xvpermi.q $xr18, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr18, 34
-; LA64-NEXT: xvpermi.q $xr19, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr19, 51
-; LA64-NEXT: xvpermi.q $xr20, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr20, 68
-; LA64-NEXT: xvpermi.q $xr21, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr21, 85
-; LA64-NEXT: xvpermi.q $xr22, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr22, 102
-; LA64-NEXT: xvpermi.q $xr23, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr23, 119
-; LA64-NEXT: xvpermi.q $xr24, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr24, 136
-; LA64-NEXT: xvpermi.q $xr25, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr25, 153
-; LA64-NEXT: xvpermi.q $xr26, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr26, 170
-; LA64-NEXT: xvpermi.q $xr27, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr27, 187
-; LA64-NEXT: xvpermi.q $xr28, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr28, 204
-; LA64-NEXT: xvpermi.q $xr29, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr29, 221
-; LA64-NEXT: xvpermi.q $xr30, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr30, 238
-; LA64-NEXT: xvpermi.q $xr31, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr31, 255
-; LA64-NEXT: xvst $xr0, $a0, 0
-; LA64-NEXT: fld.d $fs7, $sp, 16 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs6, $sp, 24 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs5, $sp, 32 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs4, $sp, 40 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs3, $sp, 48 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs2, $sp, 56 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs1, $sp, 64 # 8-byte Folded Reload
-; LA64-NEXT: fld.d $fs0, $sp, 72 # 8-byte Folded Reload
-; LA64-NEXT: addi.d $sp, $sp, 80
+; LA64-NEXT: ld.b $t0, $sp, 72
+; LA64-NEXT: ld.b $t1, $sp, 184
+; LA64-NEXT: ld.b $t2, $sp, 80
+; LA64-NEXT: ld.b $t3, $sp, 88
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 0
+; LA64-NEXT: ld.b $t0, $sp, 96
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 1
+; LA64-NEXT: ld.b $t2, $sp, 104
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 2
+; LA64-NEXT: ld.b $t3, $sp, 112
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 3
+; LA64-NEXT: ld.b $t0, $sp, 120
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 4
+; LA64-NEXT: ld.b $t2, $sp, 128
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 5
+; LA64-NEXT: ld.b $t3, $sp, 136
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 6
+; LA64-NEXT: ld.b $t0, $sp, 144
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 7
+; LA64-NEXT: ld.b $t2, $sp, 152
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 8
+; LA64-NEXT: ld.b $t3, $sp, 160
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 9
+; LA64-NEXT: ld.b $t0, $sp, 168
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 10
+; LA64-NEXT: ld.b $t2, $sp, 176
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 11
+; LA64-NEXT: ld.b $t3, $sp, 192
+; LA64-NEXT: vinsgr2vr.b $vr0, $t0, 12
+; LA64-NEXT: vinsgr2vr.b $vr0, $t2, 13
+; LA64-NEXT: vinsgr2vr.b $vr0, $t1, 14
+; LA64-NEXT: vinsgr2vr.b $vr0, $t3, 15
+; LA64-NEXT: vinsgr2vr.b $vr1, $a1, 0
+; LA64-NEXT: vinsgr2vr.b $vr1, $a2, 1
+; LA64-NEXT: vinsgr2vr.b $vr1, $a3, 2
+; LA64-NEXT: vinsgr2vr.b $vr1, $a4, 3
+; LA64-NEXT: ld.b $a1, $sp, 56
+; LA64-NEXT: vinsgr2vr.b $vr1, $a5, 4
+; LA64-NEXT: ld.b $a2, $sp, 0
+; LA64-NEXT: vinsgr2vr.b $vr1, $a6, 5
+; LA64-NEXT: ld.b $a3, $sp, 8
+; LA64-NEXT: vinsgr2vr.b $vr1, $a7, 6
+; LA64-NEXT: ld.b $a4, $sp, 16
+; LA64-NEXT: vinsgr2vr.b $vr1, $a2, 7
+; LA64-NEXT: ld.b $a2, $sp, 24
+; LA64-NEXT: vinsgr2vr.b $vr1, $a3, 8
+; LA64-NEXT: ld.b $a3, $sp, 32
+; LA64-NEXT: vinsgr2vr.b $vr1, $a4, 9
+; LA64-NEXT: ld.b $a4, $sp, 40
+; LA64-NEXT: vinsgr2vr.b $vr1, $a2, 10
+; LA64-NEXT: ld.b $a2, $sp, 48
+; LA64-NEXT: vinsgr2vr.b $vr1, $a3, 11
+; LA64-NEXT: ld.b $a3, $sp, 64
+; LA64-NEXT: vinsgr2vr.b $vr1, $a4, 12
+; LA64-NEXT: vinsgr2vr.b $vr1, $a2, 13
+; LA64-NEXT: vinsgr2vr.b $vr1, $a1, 14
+; LA64-NEXT: vinsgr2vr.b $vr1, $a3, 15
+; LA64-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA64-NEXT: xvst $xr1, $a0, 0
; LA64-NEXT: ret
entry:
%ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0
@@ -628,48 +470,21 @@ define void @buildvector_v32i8_partial(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a5,
; LA32-NEXT: ld.b $t6, $sp, 4
; LA32-NEXT: ld.b $t7, $sp, 0
; LA32-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a2
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a3
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 34
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a4
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 85
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a5
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 119
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a6
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 136
-; LA32-NEXT: xvreplgr2vr.b $xr1, $a7
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 255
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t7
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t6
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 34
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t5
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 68
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t4
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 102
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t3
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 119
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t2
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 187
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t1
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 204
-; LA32-NEXT: xvreplgr2vr.b $xr1, $t0
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.b $xr0, $xr1, 255
+; LA32-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; LA32-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; LA32-NEXT: vinsgr2vr.b $vr0, $a4, 5
+; LA32-NEXT: vinsgr2vr.b $vr0, $a5, 7
+; LA32-NEXT: vinsgr2vr.b $vr0, $a6, 8
+; LA32-NEXT: vinsgr2vr.b $vr0, $a7, 15
+; LA32-NEXT: vinsgr2vr.b $vr1, $t7, 1
+; LA32-NEXT: vinsgr2vr.b $vr1, $t6, 2
+; LA32-NEXT: vinsgr2vr.b $vr1, $t5, 4
+; LA32-NEXT: vinsgr2vr.b $vr1, $t4, 6
+; LA32-NEXT: vinsgr2vr.b $vr1, $t3, 7
+; LA32-NEXT: vinsgr2vr.b $vr1, $t2, 11
+; LA32-NEXT: vinsgr2vr.b $vr1, $t1, 12
+; LA32-NEXT: vinsgr2vr.b $vr1, $t0, 15
+; LA32-NEXT: xvpermi.q $xr0, $xr1, 2
; LA32-NEXT: xvst $xr0, $a0, 0
; LA32-NEXT: ret
;
@@ -684,48 +499,21 @@ define void @buildvector_v32i8_partial(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a5,
; LA64-NEXT: ld.b $t6, $sp, 8
; LA64-NEXT: ld.b $t7, $sp, 0
; LA64-NEXT: vinsgr2vr.b $vr0, $a1, 0
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a2
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a3
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 34
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a4
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 85
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a5
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 119
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a6
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 136
-; LA64-NEXT: xvreplgr2vr.b $xr1, $a7
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 255
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t7
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 17
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t6
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 34
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t5
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 68
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t4
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 102
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t3
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 119
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t2
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 187
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t1
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 204
-; LA64-NEXT: xvreplgr2vr.b $xr1, $t0
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.b $xr0, $xr1, 255
+; LA64-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; LA64-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; LA64-NEXT: vinsgr2vr.b $vr0, $a4, 5
+; LA64-NEXT: vinsgr2vr.b $vr0, $a5, 7
+; LA64-NEXT: vinsgr2vr.b $vr0, $a6, 8
+; LA64-NEXT: vinsgr2vr.b $vr0, $a7, 15
+; LA64-NEXT: vinsgr2vr.b $vr1, $t7, 1
+; LA64-NEXT: vinsgr2vr.b $vr1, $t6, 2
+; LA64-NEXT: vinsgr2vr.b $vr1, $t5, 4
+; LA64-NEXT: vinsgr2vr.b $vr1, $t4, 6
+; LA64-NEXT: vinsgr2vr.b $vr1, $t3, 7
+; LA64-NEXT: vinsgr2vr.b $vr1, $t2, 11
+; LA64-NEXT: vinsgr2vr.b $vr1, $t1, 12
+; LA64-NEXT: vinsgr2vr.b $vr1, $t0, 15
+; LA64-NEXT: xvpermi.q $xr0, $xr1, 2
; LA64-NEXT: xvst $xr0, $a0, 0
; LA64-NEXT: ret
entry:
@@ -1166,122 +954,64 @@ entry:
define void @buildvector_v16i16(ptr %dst, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) nounwind {
; LA32-LABEL: buildvector_v16i16:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: ld.h $t0, $sp, 32
-; LA32-NEXT: ld.h $t1, $sp, 28
-; LA32-NEXT: ld.h $t2, $sp, 24
-; LA32-NEXT: ld.h $t3, $sp, 20
-; LA32-NEXT: ld.h $t4, $sp, 16
-; LA32-NEXT: ld.h $t5, $sp, 12
+; LA32-NEXT: ld.h $t0, $sp, 0
+; LA32-NEXT: ld.h $t1, $sp, 32
+; LA32-NEXT: ld.h $t2, $sp, 28
+; LA32-NEXT: ld.h $t3, $sp, 24
+; LA32-NEXT: ld.h $t4, $sp, 20
+; LA32-NEXT: ld.h $t5, $sp, 4
; LA32-NEXT: ld.h $t6, $sp, 8
-; LA32-NEXT: ld.h $t7, $sp, 4
-; LA32-NEXT: ld.h $t8, $sp, 0
-; LA32-NEXT: vinsgr2vr.h $vr0, $a1, 0
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a2
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 17
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a3
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 34
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a4
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 51
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a5
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 68
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a6
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 85
-; LA32-NEXT: xvreplgr2vr.h $xr1, $a7
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 102
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t8
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 119
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t7
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 0
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t6
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 17
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t5
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 34
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t4
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 51
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t3
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 68
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t2
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 85
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t1
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 102
-; LA32-NEXT: xvreplgr2vr.h $xr1, $t0
-; LA32-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA32-NEXT: xvextrins.h $xr0, $xr1, 119
-; LA32-NEXT: xvst $xr0, $a0, 0
+; LA32-NEXT: ld.h $t7, $sp, 12
+; LA32-NEXT: ld.h $t8, $sp, 16
+; LA32-NEXT: vinsgr2vr.h $vr0, $t5, 0
+; LA32-NEXT: vinsgr2vr.h $vr0, $t6, 1
+; LA32-NEXT: vinsgr2vr.h $vr0, $t7, 2
+; LA32-NEXT: vinsgr2vr.h $vr0, $t8, 3
+; LA32-NEXT: vinsgr2vr.h $vr0, $t4, 4
+; LA32-NEXT: vinsgr2vr.h $vr0, $t3, 5
+; LA32-NEXT: vinsgr2vr.h $vr0, $t2, 6
+; LA32-NEXT: vinsgr2vr.h $vr0, $t1, 7
+; LA32-NEXT: vinsgr2vr.h $vr1, $a1, 0
+; LA32-NEXT: vinsgr2vr.h $vr1, $a2, 1
+; LA32-NEXT: vinsgr2vr.h $vr1, $a3, 2
+; LA32-NEXT: vinsgr2vr.h $vr1, $a4, 3
+; LA32-NEXT: vinsgr2vr.h $vr1, $a5, 4
+; LA32-NEXT: vinsgr2vr.h $vr1, $a6, 5
+; LA32-NEXT: vinsgr2vr.h $vr1, $a7, 6
+; LA32-NEXT: vinsgr2vr.h $vr1, $t0, 7
+; LA32-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA32-NEXT: xvst $xr1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: buildvector_v16i16:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: ld.h $t0, $sp, 64
-; LA64-NEXT: ld.h $t1, $sp, 56
-; LA64-NEXT: ld.h $t2, $sp, 48
-; LA64-NEXT: ld.h $t3, $sp, 40
-; LA64-NEXT: ld.h $t4, $sp, 32
-; LA64-NEXT: ld.h $t5, $sp, 24
+; LA64-NEXT: ld.h $t0, $sp, 0
+; LA64-NEXT: ld.h $t1, $sp, 64
+; LA64-NEXT: ld.h $t2, $sp, 56
+; LA64-NEXT: ld.h $t3, $sp, 48
+; LA64-NEXT: ld.h $t4, $sp, 40
+; LA64-NEXT: ld.h $t5, $sp, 8
; LA64-NEXT: ld.h $t6, $sp, 16
-; LA64-NEXT: ld.h $t7, $sp, 8
-; LA64-NEXT: ld.h $t8, $sp, 0
-; LA64-NEXT: vinsgr2vr.h $vr0, $a1, 0
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a2
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 17
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a3
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 34
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a4
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 51
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a5
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 68
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a6
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 85
-; LA64-NEXT: xvreplgr2vr.h $xr1, $a7
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 102
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t8
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 18
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 119
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t7
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 0
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t6
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 17
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t5
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 34
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t4
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 51
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t3
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 68
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t2
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 85
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t1
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 102
-; LA64-NEXT: xvreplgr2vr.h $xr1, $t0
-; LA64-NEXT: xvpermi.q $xr1, $xr0, 48
-; LA64-NEXT: xvextrins.h $xr0, $xr1, 119
-; LA64-NEXT: xvst $xr0, $a0, 0
+; LA64-NEXT: ld.h $t7, $sp, 24
+; LA64-NEXT: ld.h $t8, $sp, 32
+; LA64-NEXT: vinsgr2vr.h $vr0, $t5, 0
+; LA64-NEXT: vinsgr2vr.h $vr0, $t6, 1
+; LA64-NEXT: vinsgr2vr.h $vr0, $t7, 2
+; LA64-NEXT: vinsgr2vr.h $vr0, $t8, 3
+; LA64-NEXT: vinsgr2vr.h $vr0, $t4, 4
+; LA64-NEXT: vinsgr2vr.h $vr0, $t3, 5
+; LA64-NEXT: vinsgr2vr.h $vr0, $t2, 6
+; LA64-NEXT: vinsgr2vr.h $vr0, $t1, 7
+; LA64-NEXT: vinsgr2vr.h $vr1, $a1, 0
+; LA64-NEXT: vinsgr2vr.h $vr1, $a2, 1
+; LA64-NEXT: vinsgr2vr.h $vr1, $a3, 2
+; LA64-NEXT: vinsgr2vr.h $vr1, $a4, 3
+; LA64-NEXT: vinsgr2vr.h $vr1, $a5, 4
+; LA64-NEXT: vinsgr2vr.h $vr1, $a6, 5
+; LA64-NEXT: vinsgr2vr.h $vr1, $a7, 6
+; LA64-NEXT: vinsgr2vr.h $vr1, $t0, 7
+; LA64-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA64-NEXT: xvst $xr1, $a0, 0
; LA64-NEXT: ret
entry:
%ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0
@@ -1307,26 +1037,15 @@ entry:
define void @buildvector_v16i16_partial(ptr %dst, i16 %a0, i16 %a2, i16 %a5, i16 %a6, i16 %a7, i16 %a12, i16 %a13) nounwind {
; CHECK-LABEL: buildvector_v16i16_partial:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a2
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 34
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a3
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 85
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a4
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 102
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a5
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 18
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 119
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a6
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 68
-; CHECK-NEXT: xvreplgr2vr.h $xr1, $a7
-; CHECK-NEXT: xvpermi.q $xr1, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr1, 85
-; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a6, 4
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a7, 5
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a1, 0
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a2, 2
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a3, 5
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a4, 6
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a5, 7
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 2
+; CHECK-NEXT: xvst $xr1, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0
@@ -1497,15 +1216,16 @@ define void @buildvector_v8i32(ptr %dst, i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32
; CHECK-LABEL: buildvector_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ld.w $t0, $sp, 0
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 2
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 3
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a5, 4
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a6, 5
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a7, 6
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $t0, 7
-; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a5, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a6, 1
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a7, 2
+; CHECK-NEXT: vinsgr2vr.w $vr0, $t0, 3
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a1, 0
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a2, 1
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a3, 2
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a4, 3
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 2
+; CHECK-NEXT: xvst $xr1, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x i32> undef, i32 %a0, i32 0
@@ -1523,10 +1243,11 @@ entry:
define void @buildvector_v8i32_partial(ptr %dst, i32 %a2, i32 %a4, i32 %a5, i32 %a6) nounwind {
; CHECK-LABEL: buildvector_v8i32_partial:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 4
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 5
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 6
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 2
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a2, 0
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a3, 1
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a4, 2
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1613,24 +1334,26 @@ define void @buildvector_v4i64(ptr %dst, i64 %a0, i64 %a1, i64 %a2, i64 %a3) nou
; LA32-LABEL: buildvector_v4i64:
; LA32: # %bb.0: # %entry
; LA32-NEXT: ld.w $t0, $sp, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 1
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a3, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a4, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a5, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a6, 5
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a7, 6
-; LA32-NEXT: xvinsgr2vr.w $xr0, $t0, 7
-; LA32-NEXT: xvst $xr0, $a0, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a5, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a6, 1
+; LA32-NEXT: vinsgr2vr.w $vr0, $a7, 2
+; LA32-NEXT: vinsgr2vr.w $vr0, $t0, 3
+; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 0
+; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 1
+; LA32-NEXT: vinsgr2vr.w $vr1, $a3, 2
+; LA32-NEXT: vinsgr2vr.w $vr1, $a4, 3
+; LA32-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA32-NEXT: xvst $xr1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: buildvector_v4i64:
; LA64: # %bb.0: # %entry
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a1, 0
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a2, 1
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a3, 2
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a4, 3
-; LA64-NEXT: xvst $xr0, $a0, 0
+; LA64-NEXT: vinsgr2vr.d $vr0, $a3, 0
+; LA64-NEXT: vinsgr2vr.d $vr0, $a4, 1
+; LA64-NEXT: vinsgr2vr.d $vr1, $a1, 0
+; LA64-NEXT: vinsgr2vr.d $vr1, $a2, 1
+; LA64-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA64-NEXT: xvst $xr1, $a0, 0
; LA64-NEXT: ret
entry:
%ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0
@@ -1644,11 +1367,12 @@ entry:
define void @buildvector_v4i64_partial(ptr %dst, i64 %a1, i64 %a2) nounwind {
; LA32-LABEL: buildvector_v4i64_partial:
; LA32: # %bb.0: # %entry
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 2
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 3
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a3, 4
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a4, 5
-; LA32-NEXT: xvst $xr0, $a0, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a3, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a4, 1
+; LA32-NEXT: vinsgr2vr.w $vr1, $a1, 2
+; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 3
+; LA32-NEXT: xvpermi.q $xr1, $xr0, 2
+; LA32-NEXT: xvst $xr1, $a0, 0
; LA32-NEXT: ret
;
; LA64-LABEL: buildvector_v4i64_partial:
@@ -1723,21 +1447,21 @@ entry:
define void @buildvector_v8f32(ptr %dst, float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) nounwind {
; CHECK-LABEL: buildvector_v8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f7 killed $f7 def $xr7
-; CHECK-NEXT: # kill: def $f6 killed $f6 def $xr6
-; CHECK-NEXT: # kill: def $f5 killed $f5 def $xr5
+; CHECK-NEXT: # kill: def $f7 killed $f7 def $vr7
+; CHECK-NEXT: # kill: def $f6 killed $f6 def $vr6
+; CHECK-NEXT: # kill: def $f5 killed $f5 def $vr5
; CHECK-NEXT: # kill: def $f4 killed $f4 def $xr4
-; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3
-; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2
-; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT: # kill: def $f3 killed $f3 def $vr3
+; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1
-; CHECK-NEXT: xvinsve0.w $xr0, $xr2, 2
-; CHECK-NEXT: xvinsve0.w $xr0, $xr3, 3
-; CHECK-NEXT: xvinsve0.w $xr0, $xr4, 4
-; CHECK-NEXT: xvinsve0.w $xr0, $xr5, 5
-; CHECK-NEXT: xvinsve0.w $xr0, $xr6, 6
-; CHECK-NEXT: xvinsve0.w $xr0, $xr7, 7
+; CHECK-NEXT: vextrins.w $vr4, $vr5, 16
+; CHECK-NEXT: vextrins.w $vr4, $vr6, 32
+; CHECK-NEXT: vextrins.w $vr4, $vr7, 48
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT: vextrins.w $vr0, $vr2, 32
+; CHECK-NEXT: vextrins.w $vr0, $vr3, 48
+; CHECK-NEXT: xvpermi.q $xr0, $xr4, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1756,14 +1480,15 @@ entry:
define void @buildvector_v8f32_partial(ptr %dst, float %a1, float %a2, float %a5, float %a7) nounwind {
; CHECK-LABEL: buildvector_v8f32_partial:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3
-; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2
-; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvinsve0.w $xr0, $xr0, 1
-; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 2
-; CHECK-NEXT: xvinsve0.w $xr0, $xr2, 5
-; CHECK-NEXT: xvinsve0.w $xr0, $xr3, 7
+; CHECK-NEXT: # kill: def $f3 killed $f3 def $vr3
+; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vextrins.w $vr2, $vr2, 16
+; CHECK-NEXT: vextrins.w $vr2, $vr3, 48
+; CHECK-NEXT: vextrins.w $vr0, $vr0, 16
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 32
+; CHECK-NEXT: xvpermi.q $xr0, $xr2, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -1858,13 +1583,13 @@ entry:
define void @buildvector_v4f64(ptr %dst, double %a0, double %a1, double %a2, double %a3) nounwind {
; CHECK-LABEL: buildvector_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f3_64 killed $f3_64 def $xr3
+; CHECK-NEXT: # kill: def $f3_64 killed $f3_64 def $vr3
; CHECK-NEXT: # kill: def $f2_64 killed $f2_64 def $xr2
-; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1
+; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $vr1
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1
-; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 2
-; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 3
+; CHECK-NEXT: vextrins.d $vr2, $vr3, 16
+; CHECK-NEXT: vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr2, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
index f0277a78fa452..156c829c2dfb6 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
@@ -6,91 +6,91 @@ declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32)
define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-LABEL: powi_v8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
+; CHECK-NEXT: addi.d $sp, $sp, -128
+; CHECK-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
+; CHECK-NEXT: xvst $xr0, $sp, 80 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1
+; CHECK-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2
+; CHECK-NEXT: vextrins.w $vr1, $vr0, 32
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3
+; CHECK-NEXT: vextrins.w $vr1, $vr0, 48
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4
-; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5
-; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6
+; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vextrins.w $vr0, $vr1, 16
+; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6
-; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload
+; CHECK-NEXT: vextrins.w $vr1, $vr0, 32
+; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 7
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload
+; CHECK-NEXT: vextrins.w $vr1, $vr0, 48
+; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
+; CHECK-NEXT: xvpermi.q $xr1, $xr0, 2
; CHECK-NEXT: xvori.b $xr0, $xr1, 0
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 128
; CHECK-NEXT: ret
entry:
%res = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %va, i32 %b)
@@ -102,51 +102,50 @@ declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32)
define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-LABEL: powi_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
+; CHECK-NEXT: addi.d $sp, $sp, -112
+; CHECK-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 96 # 8-byte Folded Spill
+; CHECK-NEXT: xvst $xr0, $sp, 64 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
-; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1
-; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2
+; CHECK-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload
+; CHECK-NEXT: vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2
-; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill
-; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
-; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 3
-; CHECK-NEXT: xvori.b $xr0, $xr1, 0
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT: xvld $xr1, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: ld.d $fp, $sp, 96 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 112
; CHECK-NEXT: ret
entry:
%res = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %va, i32 %b)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
index ffb793dd13016..765473ce166df 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
@@ -7,12 +7,13 @@
define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: shufflevector_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpickve.d $xr2, $xr1, 2
-; CHECK-NEXT: xvpickve.d $xr3, $xr0, 3
-; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 1
-; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 2
-; CHECK-NEXT: xvpickve.d $xr1, $xr1, 3
-; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 3
+; CHECK-NEXT: xvpickve.d $xr2, $xr1, 3
+; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78
+; CHECK-NEXT: xvrepl128vei.d $xr3, $xr3, 1
+; CHECK-NEXT: vextrins.d $vr3, $vr2, 16
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 2
+; CHECK-NEXT: vextrins.d $vr0, $vr1, 16
+; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2
; CHECK-NEXT: ret
entry:
%c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 6, i32 3, i32 7>
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
index 72f25095d3837..aa29264924df9 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
@@ -45,9 +45,8 @@ define void @insert_32xi8_undef(ptr %dst, i8 %in) nounwind {
define void @insert_32xi8_undef_upper(ptr %dst, i8 %in) nounwind {
; CHECK-LABEL: insert_32xi8_undef_upper:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvreplgr2vr.b $xr0, $a1
-; CHECK-NEXT: xvpermi.q $xr0, $xr0, 48
-; CHECK-NEXT: xvextrins.b $xr0, $xr0, 102
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 6
+; CHECK-NEXT: xvpermi.q $xr0, $xr0, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
%v = insertelement <32 x i8> poison, i8 %in, i32 22
@@ -99,9 +98,8 @@ define void @insert_16xi16_undef(ptr %dst, i16 %in) nounwind {
define void @insert_16xi16_undef_upper(ptr %dst, i16 %in) nounwind {
; CHECK-LABEL: insert_16xi16_undef_upper:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvreplgr2vr.h $xr0, $a1
-; CHECK-NEXT: xvpermi.q $xr0, $xr0, 48
-; CHECK-NEXT: xvextrins.h $xr0, $xr0, 34
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 2
+; CHECK-NEXT: xvpermi.q $xr0, $xr0, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
%v = insertelement <16 x i16> poison, i16 %in, i32 10
diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalar-to-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/scalar-to-vector.ll
index 5593890bb7684..bba269279937a 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/scalar-to-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/scalar-to-vector.ll
@@ -25,7 +25,7 @@ define <16 x i16> @scalar_to_16xi16(i16 %val) {
define <8 x i32> @scalar_to_8xi32(i32 %val) {
; CHECK-LABEL: scalar_to_8xi32:
; CHECK: # %bb.0:
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
; CHECK-NEXT: ret
%ret = insertelement <8 x i32> poison, i32 %val, i32 0
ret <8 x i32> %ret
@@ -34,13 +34,13 @@ define <8 x i32> @scalar_to_8xi32(i32 %val) {
define <4 x i64> @scalar_to_4xi64(i64 %val) {
; LA32-LABEL: scalar_to_4xi64:
; LA32: # %bb.0:
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a0, 0
-; LA32-NEXT: xvinsgr2vr.w $xr0, $a1, 1
+; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 0
+; LA32-NEXT: vinsgr2vr.w $vr0, $a1, 1
; LA32-NEXT: ret
;
; LA64-LABEL: scalar_to_4xi64:
; LA64: # %bb.0:
-; LA64-NEXT: xvinsgr2vr.d $xr0, $a0, 0
+; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0
; LA64-NEXT: ret
%ret = insertelement <4 x i64> poison, i64 %val, i32 0
ret <4 x i64> %ret
More information about the llvm-commits
mailing list