[llvm] 8439777 - [LoongArch] Pre-commit tests for vecreduce_and/or/... (#154879)

via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 22 02:52:47 PDT 2025


Author: tangaac
Date: 2025-08-22T17:52:43+08:00
New Revision: 843977713129614db1b07ce877f59de133c8288e

URL: https://github.com/llvm/llvm-project/commit/843977713129614db1b07ce877f59de133c8288e
DIFF: https://github.com/llvm/llvm-project/commit/843977713129614db1b07ce877f59de133c8288e.diff

LOG: [LoongArch] Pre-commit tests for vecreduce_and/or/... (#154879)

Added: 
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
    llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
    llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
new file mode 100644
index 0000000000000..a3160f10c8ca8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_and_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
new file mode 100644
index 0000000000000..bc910c23e4b17
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_or_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
new file mode 100644
index 0000000000000..378088c9f8280
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_smax_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
new file mode 100644
index 0000000000000..1c7f2054cd4e1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_smin_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvmin.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
new file mode 100644
index 0000000000000..152f093cbd025
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_umax_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
new file mode 100644
index 0000000000000..64ed377535abf
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_umin_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
new file mode 100644
index 0000000000000..5dbf37e732637
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s
+
+define void @vec_reduce_xor_v32i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 228
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 32
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.b $xr1, $xr1, 14
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.b $xr1, $xr1, 1
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.b $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <32 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v16i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 228
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvbsrl.v $xr1, $xr1, 8
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.h $xr1, $xr1, 14
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.h $xr1, $xr1, 1
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.h $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v8i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 78
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 228
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvshuf4i.w $xr1, $xr1, 14
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.w $xr1, $xr1, 1
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.w $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v4i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xvld $xr0, $a0, 0
+; CHECK-NEXT:    pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT:    xvld $xr1, $a0, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT:    xvpermi.d $xr2, $xr0, 78
+; CHECK-NEXT:    xvshuf.d $xr1, $xr0, $xr2
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvpermi.d $xr1, $xr0, 68
+; CHECK-NEXT:    xvrepl128vei.d $xr1, $xr1, 1
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvstelm.d $xr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}
+

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
new file mode 100644
index 0000000000000..c16de10239642
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_and_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_and_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_and_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
new file mode 100644
index 0000000000000..52f18cce611de
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_or_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_or_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_or_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
new file mode 100644
index 0000000000000..5d8c3e36549d6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_smax_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smax_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smax_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
new file mode 100644
index 0000000000000..2d53095db89db
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_smin_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_smin_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_smin_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
new file mode 100644
index 0000000000000..abe9ba7dfb246
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umax_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umax_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
new file mode 100644
index 0000000000000..3d396f3692e7d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_umin_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_umin_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
new file mode 100644
index 0000000000000..1894532d6121d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
+
+define void @vec_reduce_xor_v16i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <16 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v8i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v4i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.b $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.xor.v4i8(<4 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v2i8(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.h $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.b $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.b $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i8>, ptr %src
+  %res = call i8 @llvm.vector.reduce.xor.v2i8(<2 x i8> %v)
+  store i8 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v8i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vbsrl.v $vr1, $vr0, 8
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <8 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v4i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.h $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v2i16(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.h $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.h $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i16>, ptr %src
+  %res = call i16 @llvm.vector.reduce.xor.v2i16(<2 x i16> %v)
+  store i16 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v4i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vshuf4i.w $vr1, $vr0, 14
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <4 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v2i32(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $a0, $a0, 0
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.w $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.w $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i32>, ptr %src
+  %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v)
+  store i32 %res, ptr %dst
+  ret void
+}
+
+define void @vec_reduce_xor_v2i64(ptr %src, ptr %dst) nounwind {
+; CHECK-LABEL: vec_reduce_xor_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vld $vr0, $a0, 0
+; CHECK-NEXT:    vreplvei.d $vr1, $vr0, 1
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vstelm.d $vr0, $a1, 0, 0
+; CHECK-NEXT:    ret
+  %v = load <2 x i64>, ptr %src
+  %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v)
+  store i64 %res, ptr %dst
+  ret void
+}


        


More information about the llvm-commits mailing list