[llvm] [LoongArch] Custom lower vecreduce. (PR #155196)

via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 24 19:31:10 PDT 2025


https://github.com/tangaac created https://github.com/llvm/llvm-project/pull/155196

None

>From 5a009440c3c379d5de613f9de1aba8800b739a08 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Fri, 22 Aug 2025 16:04:58 +0800
Subject: [PATCH] Custom lower vecreduce

---
 .../LoongArch/LoongArchISelLowering.cpp       | 43 ++++++++++
 .../Target/LoongArch/LoongArchISelLowering.h  |  1 +
 .../LoongArchTargetTransformInfo.cpp          |  7 ++
 .../CodeGen/LoongArch/lasx/vec-reduce-and.ll  | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-or.ll   | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-smax.ll | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-smin.ll | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-umax.ll | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-umin.ll | 80 ++++++++-----------
 .../CodeGen/LoongArch/lasx/vec-reduce-xor.ll  | 80 ++++++++-----------
 .../CodeGen/LoongArch/lsx/vec-reduce-and.ll   | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-or.ll    | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-smax.ll  | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-smin.ll  | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-umax.ll  | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-umin.ll  | 76 +++++++++---------
 .../CodeGen/LoongArch/lsx/vec-reduce-xor.ll   | 76 +++++++++---------
 17 files changed, 541 insertions(+), 602 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 18d3e66bc0763..a2b925e3fee6d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -341,6 +341,13 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
           MVT::v2i16, MVT::v4i32, MVT::v2i32, MVT::v2i64}) {
       setOperationAction(ISD::TRUNCATE, VT, Custom);
       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
+      setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
     }
   }
 
@@ -526,6 +533,14 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
     return lowerBF16_TO_FP(Op, DAG);
   case ISD::VECREDUCE_ADD:
     return lowerVECREDUCE_ADD(Op, DAG);
+  case ISD::VECREDUCE_AND:
+  case ISD::VECREDUCE_OR:
+  case ISD::VECREDUCE_XOR:
+  case ISD::VECREDUCE_SMAX:
+  case ISD::VECREDUCE_SMIN:
+  case ISD::VECREDUCE_UMAX:
+  case ISD::VECREDUCE_UMIN:
+    return lowerVECREDUCE(Op, DAG);
   }
   return SDValue();
 }
@@ -580,6 +595,34 @@ SDValue LoongArchTargetLowering::lowerVECREDUCE_ADD(SDValue Op,
                      DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
 }
 
+SDValue LoongArchTargetLowering::lowerVECREDUCE(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+
+  MVT OpVT = Op.getSimpleValueType();
+  SDValue Val = Op.getOperand(0);
+
+  unsigned NumEles = Val.getSimpleValueType().getVectorNumElements();
+  unsigned EleBits = Val.getSimpleValueType().getScalarSizeInBits();
+
+  // Ensure operand type legal or enable it legal.
+  while (!isTypeLegal(Val.getSimpleValueType())) {
+    Val = DAG.WidenVector(Val, DL);
+  }
+
+  unsigned Opcode = ISD::getVecReduceBaseOpcode(Op.getOpcode());
+  MVT VecTy = Val.getSimpleValueType();
+
+  for (int i = NumEles; i > 1; i /= 2) {
+    SDValue ShiftAmt = DAG.getConstant(i * EleBits / 16, DL, MVT::i64);
+    SDValue Tmp = DAG.getNode(LoongArchISD::VBSRL, DL, VecTy, Val, ShiftAmt);
+    Val = DAG.getNode(Opcode, DL, VecTy, Tmp, Val);
+  }
+
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT, Val,
+                     DAG.getConstant(0, DL, Subtarget.getGRLenVT()));
+}
+
 SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op,
                                                SelectionDAG &DAG) const {
   unsigned IsData = Op.getConstantOperandVal(4);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 40e237b1c69e4..ff22afa86ea9c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -390,6 +390,7 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerBF16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerVECREDUCE_ADD(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
index efe898c33072e..f548a8dd0532b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
@@ -100,6 +100,13 @@ bool LoongArchTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
   default:
     return true;
   case Intrinsic::vector_reduce_add:
+  case Intrinsic::vector_reduce_and:
+  case Intrinsic::vector_reduce_or:
+  case Intrinsic::vector_reduce_smax:
+  case Intrinsic::vector_reduce_smin:
+  case Intrinsic::vector_reduce_umax:
+  case Intrinsic::vector_reduce_umin:
+  case Intrinsic::vector_reduce_xor:
     return false;
   }
 }
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
index a3160f10c8ca8..fd64beab57bf0 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_and_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_and_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_and_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_and_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
index bc910c23e4b17..cdb08d9de3821 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_or_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_or_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_or_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
index 378088c9f8280..1d182731c93be 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_smax_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_smax_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_smax_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_smax_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
index 1c7f2054cd4e1..369afdd1fc7bc 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_smin_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_smin_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_smin_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_smin_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvmin.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.d $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
index 152f093cbd025..5256a72ad7d97 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_umax_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_umax_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_umax_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.du $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
index 64ed377535abf..a82c886d8eed1 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_umin_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_umin_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_umin_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_umin_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.du $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
index 5dbf37e732637..429fadcdd156e 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
@@ -5,22 +5,17 @@ define void @vec_reduce_xor_v32i8(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %src
   %res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %v)
@@ -32,19 +27,15 @@ define void @vec_reduce_xor_v16i16(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %src
   %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v)
@@ -56,16 +47,13 @@ define void @vec_reduce_xor_v8i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %src
   %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v)
@@ -77,15 +65,11 @@ define void @vec_reduce_xor_v4i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
-; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
-; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
-; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
-; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    xvpermi.q $xr1, $xr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %src
   %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
index c16de10239642..cca4ce30758f1 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_and_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_and_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_and_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_and_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_and_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_and_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_and_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_and_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_and_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_and_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_and_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
index 52f18cce611de..ce431f0cf6a74 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_or_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_or_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_or_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_or_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_or_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_or_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_or_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_or_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_or_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_or_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_or_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
index 5d8c3e36549d6..bdf153ad7794f 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_smax_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_smax_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_smax_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_smax_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_smax_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_smax_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_smax_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_smax_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.w $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_smax_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.w $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_smax_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smax_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.d $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
index 2d53095db89db..e3b3c5e6f2410 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_smin_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_smin_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_smin_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_smin_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_smin_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_smin_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_smin_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.h $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_smin_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.w $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_smin_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.w $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_smin_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_smin_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.d $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
index abe9ba7dfb246..fff2304befd68 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_umax_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_umax_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_umax_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_umax_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_umax_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_umax_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmax.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.wu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmax.wu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_umax_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umax_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.du $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
index 3d396f3692e7d..e14a294cbcfb6 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_umin_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_umin_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_umin_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_umin_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_umin_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_umin_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vmin.hu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.wu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vmin.wu $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_umin_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_umin_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.du $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
index 1894532d6121d..ae2bb8f91de05 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
@@ -6,13 +6,13 @@ define void @vec_reduce_xor_v16i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <16 x i8>, ptr %src
@@ -26,12 +26,12 @@ define void @vec_reduce_xor_v8i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i8>, ptr %src
@@ -45,10 +45,10 @@ define void @vec_reduce_xor_v4i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i8>, ptr %src
@@ -62,8 +62,8 @@ define void @vec_reduce_xor_v2i8(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i8>, ptr %src
@@ -77,11 +77,11 @@ define void @vec_reduce_xor_v8i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
 ; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <8 x i16>, ptr %src
@@ -95,10 +95,10 @@ define void @vec_reduce_xor_v4i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i16>, ptr %src
@@ -112,8 +112,8 @@ define void @vec_reduce_xor_v2i16(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 2
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i16>, ptr %src
@@ -126,10 +126,10 @@ define void @vec_reduce_xor_v4i32(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <4 x i32>, ptr %src
@@ -143,8 +143,8 @@ define void @vec_reduce_xor_v2i32(ptr %src, ptr %dst) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 4
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i32>, ptr %src
@@ -157,8 +157,8 @@ define void @vec_reduce_xor_v2i64(ptr %src, ptr %dst) nounwind {
 ; CHECK-LABEL: vec_reduce_xor_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
-; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr1, $vr0
 ; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
 ; CHECK-NEXT:    ret
   %v = load <2 x i64>, ptr %src



More information about the llvm-commits mailing list