[llvm] f3aa441 - [LoongArch] Add LSX intrinsic testcases

via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 19 02:11:18 PDT 2023


Author: chenli
Date: 2023-08-19T17:10:46+08:00
New Revision: f3aa4416319aed198841401c6c9dc2e49afe2507

URL: https://github.com/llvm/llvm-project/commit/f3aa4416319aed198841401c6c9dc2e49afe2507
DIFF: https://github.com/llvm/llvm-project/commit/f3aa4416319aed198841401c6c9dc2e49afe2507.diff

LOG: [LoongArch] Add LSX intrinsic testcases

Depends on D155829

Reviewed By: SixWeining

Differential Revision: https://reviews.llvm.org/D155834

Added: 
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-absd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-add.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-adda.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-addi.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-addw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-and.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-andi.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-andn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-avg.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-avgr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitclr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitrev.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitsel.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitseli.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitset.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsll.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsrl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-clo.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-clz.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-div.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-exth.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-extl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-extrins.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fadd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fclass.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvt.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvth.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvtl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fdiv.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ffint.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-flogb.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmadd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmax.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmaxa.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmin.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmina.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmsub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmul.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmadd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmsub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecip.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-frint.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrt.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-frstp.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsqrt.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ftint.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-haddw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-hsubw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ilv.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-insgr2vr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldi.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-madd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-maddw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-max.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-min.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-mod.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskgez.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskltz.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-msknz.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-msub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-muh.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-mul.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-mulw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-neg.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-nor.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-nori.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-or.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ori.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-orn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-pack.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-pcnt.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-permi.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-pick.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-replve.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-replvei.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-rotr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sadd.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sat.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-seq.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf4i.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-signcov.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sle.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sll.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sllwil.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-slt.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sra.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sran.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srani.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srar.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srl.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srln.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlr.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssran.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrani.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrln.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrn.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrni.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-sub.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-subi.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-subw.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-xor.ll
    llvm/test/CodeGen/LoongArch/lsx/intrinsic-xori.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-absd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-absd.ll
new file mode 100644
index 00000000000000..811d9d712de4e6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-absd.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vabsd.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vabsd_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vabsd.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vabsd.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vabsd_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vabsd.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vabsd.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vabsd_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vabsd.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vabsd.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vabsd_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vabsd.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vabsd.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vabsd_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vabsd.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vabsd.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vabsd_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vabsd.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vabsd.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vabsd_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vabsd.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vabsd.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vabsd_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vabsd_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vabsd.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vabsd.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-add.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-add.ll
new file mode 100644
index 00000000000000..fac16c8308dafb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-add.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vadd.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vadd_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vadd.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vadd.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vadd_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vadd.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vadd.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vadd_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vadd.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vadd.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vadd_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vadd.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vadd.q(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vadd_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vadd_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadd.q $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vadd.q(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-adda.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-adda.ll
new file mode 100644
index 00000000000000..79be0a184bfb18
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-adda.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vadda.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vadda_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vadda_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadda.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vadda.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vadda.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vadda_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vadda_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadda.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vadda.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vadda.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vadda_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vadda_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadda.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vadda.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vadda.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vadda_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vadda_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vadda.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vadda.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addi.ll
new file mode 100644
index 00000000000000..b9134e0724fe4c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addi.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vaddi.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vaddi_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vaddi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddi.bu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vaddi.bu(<16 x i8> %va, i32 31)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddi.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vaddi_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vaddi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddi.hu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddi.hu(<8 x i16> %va, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddi.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vaddi_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vaddi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddi.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddi.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vaddi_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vaddi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddi.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddi.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addw.ll
new file mode 100644
index 00000000000000..086e3bec12d236
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-addw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwev.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwev_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwev.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwev_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwev_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwev_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwev_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwev_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwev_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwev_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwev_h_bu_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.h.bu.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwev.h.bu.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwev_w_hu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.w.hu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwev.w.hu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwev_d_wu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.d.wu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.d.wu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwev_q_du_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwev.q.du.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwev.q.du.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwod.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwod_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwod.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwod_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwod_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwod_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwod_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwod_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwod_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwod_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vaddwod_h_bu_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.h.bu.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vaddwod.h.bu.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vaddwod_w_hu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.w.hu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vaddwod.w.hu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vaddwod_d_wu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.d.wu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.d.wu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vaddwod_q_du_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vaddwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vaddwod.q.du.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vaddwod.q.du.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-and.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-and.ll
new file mode 100644
index 00000000000000..77496239c3a9f7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-and.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vand.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vand_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vand_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vand.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vand.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andi.ll
new file mode 100644
index 00000000000000..9a1c38a641d056
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andi.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vandi.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vandi_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vandi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vandi.b $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vandi.b(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andn.ll
new file mode 100644
index 00000000000000..b08c759ecc322b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-andn.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vandn.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vandn_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vandn_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vandn.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vandn.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avg.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avg.ll
new file mode 100644
index 00000000000000..fb0861f4cd5eec
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avg.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vavg.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vavg_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vavg.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vavg.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vavg_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vavg.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vavg.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vavg_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vavg.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vavg.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vavg_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vavg.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vavg.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vavg_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vavg.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vavg.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vavg_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vavg.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vavg.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vavg_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vavg.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vavg.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vavg_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vavg_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavg.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vavg.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avgr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avgr.ll
new file mode 100644
index 00000000000000..8bf7d0ed881732
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-avgr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vavgr.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vavgr_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vavgr.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vavgr.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vavgr_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vavgr.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vavgr.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vavgr_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vavgr.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vavgr.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vavgr_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vavgr.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vavgr.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vavgr_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vavgr.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vavgr.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vavgr_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vavgr.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vavgr.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vavgr_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vavgr.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vavgr.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vavgr_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vavgr_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vavgr.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vavgr.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitclr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitclr.ll
new file mode 100644
index 00000000000000..f5fba6dbb14143
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitclr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vbitclr_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitclr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclr.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitclr.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vbitclr_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitclr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclr.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitclr.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vbitclr_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitclr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclr.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitclr.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vbitclr_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitclr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclr.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitclr.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vbitclri_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vbitclri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclri.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitclri.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vbitclri_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vbitclri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclri.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitclri.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vbitclri_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vbitclri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclri.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitclri.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vbitclri_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vbitclri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitclri.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitclri.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitrev.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitrev.ll
new file mode 100644
index 00000000000000..ad56e88fdb8828
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitrev.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vbitrev_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitrev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrev.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitrev.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vbitrev_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitrev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrev.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitrev.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vbitrev_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitrev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrev.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitrev.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vbitrev_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitrev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrev.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitrev.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vbitrevi_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vbitrevi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrevi.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitrevi.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vbitrevi_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vbitrevi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrevi.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitrevi.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vbitrevi_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vbitrevi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrevi.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitrevi.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vbitrevi_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vbitrevi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitrevi.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitrevi.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitsel.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitsel.ll
new file mode 100644
index 00000000000000..4b4b5ff1fc8cdb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitsel.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitsel.v(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vbitsel_v(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vbitsel_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitsel.v $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitsel.v(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitseli.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitseli.ll
new file mode 100644
index 00000000000000..28d342b5c378fb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitseli.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitseli.b(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vbitseli_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitseli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitseli.b $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitseli.b(<16 x i8> %va, <16 x i8> %vb, i32 255)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitset.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitset.ll
new file mode 100644
index 00000000000000..75d98e6f8bce1a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bitset.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitset.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vbitset_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitset_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitset.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitset.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitset.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vbitset_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitset_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitset.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitset.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitset.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vbitset_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitset_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitset.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitset.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitset.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vbitset_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vbitset_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitset.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitset.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vbitseti.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vbitseti_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vbitseti_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitseti.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbitseti.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vbitseti.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vbitseti_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vbitseti_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitseti.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vbitseti.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vbitseti.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vbitseti_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vbitseti_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitseti.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vbitseti.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vbitseti.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vbitseti_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vbitseti_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbitseti.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vbitseti.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsll.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsll.ll
new file mode 100644
index 00000000000000..e7eb1cfcb40747
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsll.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbsll.v(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vbsll_v(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vbsll_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbsll.v $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbsll.v(<16 x i8> %va, i32 31)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsrl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsrl.ll
new file mode 100644
index 00000000000000..fe0565297641bc
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-bsrl.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vbsrl.v(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vbsrl_v(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vbsrl_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vbsrl.v $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vbsrl.v(<16 x i8> %va, i32 31)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clo.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clo.ll
new file mode 100644
index 00000000000000..c581109f3fd0b5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clo.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vclo.b(<16 x i8>)
+
+define <16 x i8> @lsx_vclo_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vclo_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclo.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vclo.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vclo.h(<8 x i16>)
+
+define <8 x i16> @lsx_vclo_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vclo_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclo.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vclo.h(<8 x i16> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vclo.w(<4 x i32>)
+
+define <4 x i32> @lsx_vclo_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vclo_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclo.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vclo.w(<4 x i32> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vclo.d(<2 x i64>)
+
+define <2 x i64> @lsx_vclo_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vclo_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclo.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vclo.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clz.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clz.ll
new file mode 100644
index 00000000000000..25c37b64349b35
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-clz.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vclz.b(<16 x i8>)
+
+define <16 x i8> @lsx_vclz_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vclz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclz.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vclz.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vclz.h(<8 x i16>)
+
+define <8 x i16> @lsx_vclz_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vclz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclz.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vclz.h(<8 x i16> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vclz.w(<4 x i32>)
+
+define <4 x i32> @lsx_vclz_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vclz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclz.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vclz.w(<4 x i32> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vclz.d(<2 x i64>)
+
+define <2 x i64> @lsx_vclz_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vclz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vclz.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vclz.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-div.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-div.ll
new file mode 100644
index 00000000000000..53166e84d269a3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-div.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vdiv.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vdiv_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vdiv.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vdiv.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vdiv_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vdiv.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vdiv.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vdiv_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vdiv.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vdiv.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vdiv_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vdiv.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vdiv.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vdiv_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vdiv.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vdiv.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vdiv_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vdiv.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vdiv.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vdiv_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vdiv.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vdiv.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vdiv_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vdiv_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vdiv.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vdiv.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-exth.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-exth.ll
new file mode 100644
index 00000000000000..2f3e891a9eef2a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-exth.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8>)
+
+define <8 x i16> @lsx_vexth_h_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.h.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vexth.h.b(<16 x i8> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16>)
+
+define <4 x i32> @lsx_vexth_w_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.w.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vexth.w.h(<8 x i16> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32>)
+
+define <2 x i64> @lsx_vexth_d_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.d.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.d.w(<4 x i32> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64>)
+
+define <2 x i64> @lsx_vexth_q_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.q.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.q.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8>)
+
+define <8 x i16> @lsx_vexth_hu_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.hu.bu $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vexth.hu.bu(<16 x i8> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16>)
+
+define <4 x i32> @lsx_vexth_wu_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.wu.hu $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vexth.wu.hu(<8 x i16> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32>)
+
+define <2 x i64> @lsx_vexth_du_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.du.wu $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.du.wu(<4 x i32> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64>)
+
+define <2 x i64> @lsx_vexth_qu_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vexth_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vexth.qu.du $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vexth.qu.du(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extl.ll
new file mode 100644
index 00000000000000..cbf19e2a391905
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extl.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <2 x i64> @llvm.loongarch.lsx.vextl.q.d(<2 x i64>)
+
+define <2 x i64> @lsx_vextl_q_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vextl_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextl.q.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vextl.q.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vextl.qu.du(<2 x i64>)
+
+define <2 x i64> @lsx_vextl_qu_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vextl_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextl.qu.du $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vextl.qu.du(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extrins.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extrins.ll
new file mode 100644
index 00000000000000..8f03a2b812917e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-extrins.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vextrins.b(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vextrins_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vextrins_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextrins.b $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vextrins.b(<16 x i8> %va, <16 x i8> %vb, i32 255)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vextrins.h(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vextrins_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vextrins_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextrins.h $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vextrins.h(<8 x i16> %va, <8 x i16> %vb, i32 255)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vextrins.w(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vextrins_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vextrins_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextrins.w $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vextrins.w(<4 x i32> %va, <4 x i32> %vb, i32 255)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vextrins.d(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vextrins_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vextrins_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vextrins.d $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vextrins.d(<2 x i64> %va, <2 x i64> %vb, i32 255)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fadd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fadd.ll
new file mode 100644
index 00000000000000..569002314c9292
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfadd.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfadd_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfadd.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfadd.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfadd.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfadd_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfadd.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fclass.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fclass.ll
new file mode 100644
index 00000000000000..0c668218710174
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fclass.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x i32> @llvm.loongarch.lsx.vfclass.s(<4 x float>)
+
+define <4 x i32> @lsx_vfclass_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfclass_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfclass.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfclass.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfclass.d(<2 x double>)
+
+define <2 x i64> @lsx_vfclass_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfclass_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfclass.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfclass.d(<2 x double> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll
new file mode 100644
index 00000000000000..669c53b73b16fe
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll
@@ -0,0 +1,530 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.caf.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_caf_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_caf_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.caf.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.caf.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.caf.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_caf_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_caf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.caf.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.caf.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cun.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cun_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cun_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cun.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cun.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cun.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cun_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cun_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cun.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cun.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.ceq.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_ceq_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_ceq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.ceq.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.ceq.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.ceq.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_ceq_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_ceq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.ceq.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.ceq.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cueq.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cueq_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cueq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cueq.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cueq.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cueq.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cueq_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cueq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cueq.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cueq.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.clt.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_clt_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_clt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.clt.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.clt.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.clt.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_clt_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_clt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.clt.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.clt.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cult.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cult_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cult_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cult.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cult.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cult.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cult_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cult_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cult.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cult.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cle.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cle_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cle_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cle.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cle.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cle.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cle_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cle.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cle.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cule.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cule_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cule_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cule.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cule.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cule.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cule_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cule_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cule.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cule.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cne.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cne_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cne.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cne.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cne.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cne_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cne.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cne.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cor.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cor_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cor_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cor.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cor.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cor.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cor_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cor_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cor.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cor.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.cune.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_cune_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cune_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cune.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.cune.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.cune.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_cune_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_cune_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.cune.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.cune.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.saf.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_saf_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_saf_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.saf.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.saf.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.saf.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_saf_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_saf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.saf.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.saf.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sun.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sun_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sun_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sun.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sun.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sun.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sun_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sun_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sun.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sun.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.seq.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_seq_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_seq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.seq.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.seq.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.seq.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_seq_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_seq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.seq.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.seq.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sueq.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sueq_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sueq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sueq.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sueq.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sueq.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sueq_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sueq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sueq.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sueq.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.slt.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_slt_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_slt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.slt.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.slt.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.slt.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_slt_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_slt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.slt.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.slt.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sult.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sult_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sult_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sult.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sult.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sult.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sult_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sult_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sult.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sult.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sle.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sle_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sle_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sle.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sle.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sle.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sle_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sle.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sle.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sule.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sule_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sule_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sule.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sule.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sule.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sule_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sule_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sule.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sule.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sne.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sne_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sne.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sne.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sne.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sne_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sne.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sne.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sor.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sor_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sor_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sor.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sor.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sor.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sor_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sor_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sor.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sor.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vfcmp.sune.s(<4 x float>, <4 x float>)
+
+define <4 x i32> @lsx_vfcmp_sune_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sune_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sune.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vfcmp.sune.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vfcmp.sune.d(<2 x double>, <2 x double>)
+
+define <2 x i64> @lsx_vfcmp_sune_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcmp_sune_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcmp.sune.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vfcmp.sune.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvt.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvt.ll
new file mode 100644
index 00000000000000..a6a151a96d84e7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vfcvt.h.s(<4 x float>, <4 x float>)
+
+define <8 x i16> @lsx_vfcvt_h_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcvt_h_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvt.h.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vfcvt.h.s(<4 x float> %va, <4 x float> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vfcvt.s.d(<2 x double>, <2 x double>)
+
+define <4 x float> @lsx_vfcvt_s_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfcvt_s_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvt.s.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfcvt.s.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x float> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvth.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvth.ll
new file mode 100644
index 00000000000000..a9e4328bd011db
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvth.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfcvth.s.h(<8 x i16>)
+
+define <4 x float> @lsx_vfcvth_s_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vfcvth_s_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvth.s.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfcvth.s.h(<8 x i16> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfcvth.d.s(<4 x float>)
+
+define <2 x double> @lsx_vfcvth_d_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfcvth_d_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvth.d.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfcvth.d.s(<4 x float> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvtl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvtl.ll
new file mode 100644
index 00000000000000..9a69964bb22741
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcvtl.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfcvtl.s.h(<8 x i16>)
+
+define <4 x float> @lsx_vfcvtl_s_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vfcvtl_s_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvtl.s.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfcvtl.s.h(<8 x i16> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfcvtl.d.s(<4 x float>)
+
+define <2 x double> @lsx_vfcvtl_d_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfcvtl_d_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcvtl.d.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfcvtl.d.s(<4 x float> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fdiv.ll
new file mode 100644
index 00000000000000..1ca8e5e2c0e9c4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fdiv.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfdiv.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfdiv_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfdiv_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfdiv.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfdiv.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfdiv.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfdiv_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfdiv_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfdiv.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfdiv.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ffint.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ffint.ll
new file mode 100644
index 00000000000000..62fbcfa339cda6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ffint.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vffint.s.w(<4 x i32>)
+
+define <4 x float> @lsx_vffint_s_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vffint_s_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffint.s.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vffint.s.w(<4 x i32> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vffint.d.l(<2 x i64>)
+
+define <2 x double> @lsx_vffint_d_l(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vffint_d_l:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffint.d.l $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vffint.d.l(<2 x i64> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vffint.s.wu(<4 x i32>)
+
+define <4 x float> @lsx_vffint_s_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vffint_s_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffint.s.wu $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vffint.s.wu(<4 x i32> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vffint.d.lu(<2 x i64>)
+
+define <2 x double> @lsx_vffint_d_lu(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vffint_d_lu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffint.d.lu $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vffint.d.lu(<2 x i64> %va)
+  ret <2 x double> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vffintl.d.w(<4 x i32>)
+
+define <2 x double> @lsx_vffintl_d_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vffintl_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffintl.d.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vffintl.d.w(<4 x i32> %va)
+  ret <2 x double> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vffinth.d.w(<4 x i32>)
+
+define <2 x double> @lsx_vffinth_d_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vffinth_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffinth.d.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vffinth.d.w(<4 x i32> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vffint.s.l(<2 x i64>, <2 x i64>)
+
+define <4 x float> @lsx_vffint_s_l(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vffint_s_l:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vffint.s.l $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vffint.s.l(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x float> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-flogb.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-flogb.ll
new file mode 100644
index 00000000000000..d8382acc70ed68
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-flogb.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vflogb.s(<4 x float>)
+
+define <4 x float> @lsx_vflogb_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vflogb_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vflogb.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vflogb.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vflogb.d(<2 x double>)
+
+define <2 x double> @lsx_vflogb_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vflogb_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vflogb.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vflogb.d(<2 x double> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmadd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmadd.ll
new file mode 100644
index 00000000000000..adbaf6c76b1b6a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmadd.s(<4 x float>, <4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmadd_s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) nounwind {
+; CHECK-LABEL: lsx_vfmadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmadd.s $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmadd.s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmadd.d(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmadd_d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) nounwind {
+; CHECK-LABEL: lsx_vfmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmadd.d $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmadd.d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmax.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmax.ll
new file mode 100644
index 00000000000000..89f757c4e45679
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmax.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmax.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmax_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmax_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmax.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmax.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmax.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmax_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmax_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmax.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmaxa.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmaxa.ll
new file mode 100644
index 00000000000000..5662acc0b9a143
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmaxa.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmaxa.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmaxa_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmaxa_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmaxa.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmaxa.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmaxa.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmaxa_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmaxa_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmaxa.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmaxa.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmin.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmin.ll
new file mode 100644
index 00000000000000..0f844240277fb0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmin.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmin.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmin_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmin_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmin.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmin.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmin.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmin_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmin_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmin.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmina.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmina.ll
new file mode 100644
index 00000000000000..27f70b5fba3229
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmina.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmina.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmina_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmina_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmina.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmina.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmina.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmina_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmina_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmina.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmina.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmsub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmsub.ll
new file mode 100644
index 00000000000000..856ca9cadbd905
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmsub.s(<4 x float>, <4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmsub_s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) nounwind {
+; CHECK-LABEL: lsx_vfmsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmsub.s $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmsub.s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmsub.d(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmsub_d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) nounwind {
+; CHECK-LABEL: lsx_vfmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmsub.d $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmsub.d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmul.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmul.ll
new file mode 100644
index 00000000000000..1e6c4c77d536b4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fmul.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfmul.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfmul_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmul_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmul.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfmul.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfmul.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfmul_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfmul_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfmul.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfmul.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmadd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmadd.ll
new file mode 100644
index 00000000000000..e1a9ea78ef9db5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfnmadd.s(<4 x float>, <4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfnmadd_s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) nounwind {
+; CHECK-LABEL: lsx_vfnmadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfnmadd.s $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfnmadd.s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfnmadd.d(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfnmadd_d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) nounwind {
+; CHECK-LABEL: lsx_vfnmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfnmadd.d $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfnmadd.d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmsub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmsub.ll
new file mode 100644
index 00000000000000..46db0f4a50613a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fnmsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfnmsub.s(<4 x float>, <4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfnmsub_s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) nounwind {
+; CHECK-LABEL: lsx_vfnmsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfnmsub.s $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfnmsub.s(<4 x float> %va, <4 x float> %vb, <4 x float> %vc)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfnmsub.d(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfnmsub_d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) nounwind {
+; CHECK-LABEL: lsx_vfnmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfnmsub.d $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfnmsub.d(<2 x double> %va, <2 x double> %vb, <2 x double> %vc)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecip.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecip.ll
new file mode 100644
index 00000000000000..669fde5912d4b9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecip.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfrecip.s(<4 x float>)
+
+define <4 x float> @lsx_vfrecip_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrecip_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrecip.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrecip.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrecip.d(<2 x double>)
+
+define <2 x double> @lsx_vfrecip_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrecip_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrecip.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrecip.d(<2 x double> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frint.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frint.ll
new file mode 100644
index 00000000000000..8d872fc7296255
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frint.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfrintrne.s(<4 x float>)
+
+define <4 x float> @lsx_vfrintrne_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrne.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrintrne.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrintrne.d(<2 x double>)
+
+define <2 x double> @lsx_vfrintrne_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrne.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrintrne.d(<2 x double> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vfrintrz.s(<4 x float>)
+
+define <4 x float> @lsx_vfrintrz_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrz_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrz.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrintrz.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrintrz.d(<2 x double>)
+
+define <2 x double> @lsx_vfrintrz_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrz.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrintrz.d(<2 x double> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vfrintrp.s(<4 x float>)
+
+define <4 x float> @lsx_vfrintrp_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrp_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrp.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrintrp.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrintrp.d(<2 x double>)
+
+define <2 x double> @lsx_vfrintrp_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrp_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrp.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrintrp.d(<2 x double> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vfrintrm.s(<4 x float>)
+
+define <4 x float> @lsx_vfrintrm_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrm_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrm.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrintrm.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrintrm.d(<2 x double>)
+
+define <2 x double> @lsx_vfrintrm_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrintrm_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrintrm.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrintrm.d(<2 x double> %va)
+  ret <2 x double> %res
+}
+
+declare <4 x float> @llvm.loongarch.lsx.vfrint.s(<4 x float>)
+
+define <4 x float> @lsx_vfrint_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrint_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrint.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrint.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrint.d(<2 x double>)
+
+define <2 x double> @lsx_vfrint_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrint_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrint.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrint.d(<2 x double> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrt.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrt.ll
new file mode 100644
index 00000000000000..326d87308b0ba6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfrsqrt.s(<4 x float>)
+
+define <4 x float> @lsx_vfrsqrt_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfrsqrt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrsqrt.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfrsqrt.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfrsqrt.d(<2 x double>)
+
+define <2 x double> @lsx_vfrsqrt_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfrsqrt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrsqrt.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfrsqrt.d(<2 x double> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frstp.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frstp.ll
new file mode 100644
index 00000000000000..5c072b194d4fef
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frstp.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vfrstp.b(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vfrstp_b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vfrstp_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrstp.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vfrstp.b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vfrstp.h(<8 x i16>, <8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vfrstp_h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vfrstp_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrstp.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vfrstp.h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <8 x i16> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vfrstpi.b(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vfrstpi_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vfrstpi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrstpi.b $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vfrstpi.b(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vfrstpi.h(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vfrstpi_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vfrstpi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfrstpi.h $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vfrstpi.h(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsqrt.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsqrt.ll
new file mode 100644
index 00000000000000..55bffba9e99e9c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsqrt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfsqrt.s(<4 x float>)
+
+define <4 x float> @lsx_vfsqrt_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vfsqrt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfsqrt.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfsqrt.s(<4 x float> %va)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfsqrt.d(<2 x double>)
+
+define <2 x double> @lsx_vfsqrt_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vfsqrt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfsqrt.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfsqrt.d(<2 x double> %va)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsub.ll
new file mode 100644
index 00000000000000..2beba4a70dc960
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.loongarch.lsx.vfsub.s(<4 x float>, <4 x float>)
+
+define <4 x float> @lsx_vfsub_s(<4 x float> %va, <4 x float> %vb) nounwind {
+; CHECK-LABEL: lsx_vfsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfsub.s $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x float> @llvm.loongarch.lsx.vfsub.s(<4 x float> %va, <4 x float> %vb)
+  ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.loongarch.lsx.vfsub.d(<2 x double>, <2 x double>)
+
+define <2 x double> @lsx_vfsub_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vfsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x double> @llvm.loongarch.lsx.vfsub.d(<2 x double> %va, <2 x double> %vb)
+  ret <2 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ftint.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ftint.ll
new file mode 100644
index 00000000000000..2a494cd7fa874b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ftint.ll
@@ -0,0 +1,350 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrne.w.s(<4 x float>)
+
+define <4 x i32> @lsx_vftintrne_w_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrne_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrne.w.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrne.l.d(<2 x double>)
+
+define <2 x i64> @lsx_vftintrne_l_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrne_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrne.l.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrne.l.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrz.w.s(<4 x float>)
+
+define <4 x i32> @lsx_vftintrz_w_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrz_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrz.w.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrz.l.d(<2 x double>)
+
+define <2 x i64> @lsx_vftintrz_l_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrz_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrz.l.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrz.l.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrp.w.s(<4 x float>)
+
+define <4 x i32> @lsx_vftintrp_w_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrp_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrp.w.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrp.l.d(<2 x double>)
+
+define <2 x i64> @lsx_vftintrp_l_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrp_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrp.l.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrp.l.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrm.w.s(<4 x float>)
+
+define <4 x i32> @lsx_vftintrm_w_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrm_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrm.w.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrm.l.d(<2 x double>)
+
+define <2 x i64> @lsx_vftintrm_l_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrm_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrm.l.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrm.l.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftint.w.s(<4 x float>)
+
+define <4 x i32> @lsx_vftint_w_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftint_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftint.w.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftint.w.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftint.l.d(<2 x double>)
+
+define <2 x i64> @lsx_vftint_l_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftint_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftint.l.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftint.l.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrz.wu.s(<4 x float>)
+
+define <4 x i32> @lsx_vftintrz_wu_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrz_wu_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrz.wu.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrz.wu.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrz.lu.d(<2 x double>)
+
+define <2 x i64> @lsx_vftintrz_lu_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrz_lu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrz.lu.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrz.lu.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftint.wu.s(<4 x float>)
+
+define <4 x i32> @lsx_vftint_wu_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftint_wu_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftint.wu.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftint.wu.s(<4 x float> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftint.lu.d(<2 x double>)
+
+define <2 x i64> @lsx_vftint_lu_d(<2 x double> %va) nounwind {
+; CHECK-LABEL: lsx_vftint_lu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftint.lu.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftint.lu.d(<2 x double> %va)
+  ret <2 x i64> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrne.w.d(<2 x double>, <2 x double>)
+
+define <4 x i32> @lsx_vftintrne_w_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vftintrne_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrne.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrne.w.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrz.w.d(<2 x double>, <2 x double>)
+
+define <4 x i32> @lsx_vftintrz_w_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vftintrz_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrz.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrz.w.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrp.w.d(<2 x double>, <2 x double>)
+
+define <4 x i32> @lsx_vftintrp_w_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vftintrp_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrp.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrp.w.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftintrm.w.d(<2 x double>, <2 x double>)
+
+define <4 x i32> @lsx_vftintrm_w_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vftintrm_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrm.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftintrm.w.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vftint.w.d(<2 x double>, <2 x double>)
+
+define <4 x i32> @lsx_vftint_w_d(<2 x double> %va, <2 x double> %vb) nounwind {
+; CHECK-LABEL: lsx_vftint_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftint.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vftint.w.d(<2 x double> %va, <2 x double> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrnel.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrnel_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrnel_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrnel.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrnel.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrneh.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrneh_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrneh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrneh.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrneh.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrzl.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrzl_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrzl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrzl.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrzl.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrzh.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrzh_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrzh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrzh.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrzh.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrpl.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrpl_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrpl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrpl.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrpl.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrph.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrph_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrph_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrph.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrph.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrml.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrml_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrml_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrml.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrml.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintrmh.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintrmh_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintrmh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintrmh.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintrmh.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftintl.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftintl_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftintl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftintl.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftintl.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vftinth.l.s(<4 x float>)
+
+define <2 x i64> @lsx_vftinth_l_s(<4 x float> %va) nounwind {
+; CHECK-LABEL: lsx_vftinth_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vftinth.l.s $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vftinth.l.s(<4 x float> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-haddw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-haddw.ll
new file mode 100644
index 00000000000000..05725582334ae3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-haddw.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vhaddw.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vhaddw_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vhaddw.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vhaddw.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vhaddw_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vhaddw.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhaddw.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vhaddw_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhaddw.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhaddw.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vhaddw_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhaddw.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vhaddw.hu.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vhaddw_hu_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.hu.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vhaddw.hu.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vhaddw.wu.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vhaddw_wu_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.wu.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vhaddw.wu.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhaddw.du.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vhaddw_du_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.du.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhaddw.du.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhaddw.qu.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vhaddw_qu_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vhaddw_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhaddw.qu.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhaddw.qu.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-hsubw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-hsubw.ll
new file mode 100644
index 00000000000000..dd5815b2ea85a6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-hsubw.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vhsubw.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vhsubw_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vhsubw.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vhsubw.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vhsubw_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vhsubw.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhsubw.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vhsubw_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhsubw.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhsubw.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vhsubw_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhsubw.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vhsubw.hu.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vhsubw_hu_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.hu.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vhsubw.hu.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vhsubw.wu.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vhsubw_wu_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.wu.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vhsubw.wu.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhsubw.du.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vhsubw_du_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.du.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhsubw.du.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vhsubw.qu.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vhsubw_qu_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vhsubw_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vhsubw.qu.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vhsubw.qu.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ilv.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ilv.ll
new file mode 100644
index 00000000000000..77b0b3484df8c4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ilv.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vilvl.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vilvl_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvl.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vilvl.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vilvl.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vilvl_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvl.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vilvl.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vilvl.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vilvl_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvl.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vilvl.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vilvl.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vilvl_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvl.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vilvl.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vilvh.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vilvh_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvh_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvh.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vilvh.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vilvh.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vilvh_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvh_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvh.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vilvh.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vilvh.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vilvh_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvh_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvh.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vilvh.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vilvh.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vilvh_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vilvh_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vilvh.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vilvh.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-insgr2vr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-insgr2vr.ll
new file mode 100644
index 00000000000000..61d2cbd2806646
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-insgr2vr.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vinsgr2vr.b(<16 x i8>, i32, i32)
+
+define <16 x i8> @lsx_vinsgr2vr_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vinsgr2vr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    vinsgr2vr.b $vr0, $a0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vinsgr2vr.b(<16 x i8> %va, i32 1, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vinsgr2vr.h(<8 x i16>, i32, i32)
+
+define <8 x i16> @lsx_vinsgr2vr_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vinsgr2vr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    vinsgr2vr.h $vr0, $a0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vinsgr2vr.h(<8 x i16> %va, i32 1, i32 7)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vinsgr2vr.w(<4 x i32>, i32, i32)
+
+define <4 x i32> @lsx_vinsgr2vr_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vinsgr2vr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    vinsgr2vr.w $vr0, $a0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vinsgr2vr.w(<4 x i32> %va, i32 1, i32 3)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64>, i64, i32)
+
+define <2 x i64> @lsx_vinsgr2vr_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vinsgr2vr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    vinsgr2vr.d $vr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64> %va, i64 1, i32 1)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
new file mode 100644
index 00000000000000..b9e2ff8088d834
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ld.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vld(i8*, i32)
+
+define <16 x i8> @lsx_vld(i8* %p) nounwind {
+; CHECK-LABEL: lsx_vld:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vld(i8* %p, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vldx(i8*, i64)
+
+define <16 x i8> @lsx_vldx(i8* %p, i64 %b) nounwind {
+; CHECK-LABEL: lsx_vldx:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldx $vr0, $a0, $a1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldx(i8* %p, i64 %b)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldi.ll
new file mode 100644
index 00000000000000..ace910b54d9a6b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldi.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <2 x i64> @llvm.loongarch.lsx.vldi(i32)
+
+define <2 x i64> @lsx_vldi() nounwind {
+; CHECK-LABEL: lsx_vldi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldi $vr0, 4095
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldi(i32 4095)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vrepli.b(i32)
+
+define <16 x i8> @lsx_vrepli_b() nounwind {
+; CHECK-LABEL: lsx_vrepli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrepli.b $vr0, 511
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vrepli.b(i32 511)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vrepli.h(i32)
+
+define <8 x i16> @lsx_vrepli_h() nounwind {
+; CHECK-LABEL: lsx_vrepli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrepli.h $vr0, 511
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vrepli.h(i32 511)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vrepli.w(i32)
+
+define <4 x i32> @lsx_vrepli_w() nounwind {
+; CHECK-LABEL: lsx_vrepli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrepli.w $vr0, 511
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vrepli.w(i32 511)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vrepli.d(i32)
+
+define <2 x i64> @lsx_vrepli_d() nounwind {
+; CHECK-LABEL: lsx_vrepli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrepli.d $vr0, 511
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vrepli.d(i32 511)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
new file mode 100644
index 00000000000000..1a9cf3d3a7665d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ldrepl.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8*, i32)
+
+define <16 x i8> @lsx_vldrepl_b(i8* %p, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vldrepl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldrepl.b $vr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vldrepl.b(i8* %p, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8*, i32)
+
+define <8 x i16> @lsx_vldrepl_h(i8* %p, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vldrepl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldrepl.h $vr0, $a0, 2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vldrepl.h(i8* %p, i32 2)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8*, i32)
+
+define <4 x i32> @lsx_vldrepl_w(i8* %p, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vldrepl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldrepl.w $vr0, $a0, 4
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vldrepl.w(i8* %p, i32 4)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8*, i32)
+
+define <2 x i64> @lsx_vldrepl_d(i8* %p, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vldrepl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vldrepl.d $vr0, $a0, 8
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vldrepl.d(i8* %p, i32 8)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-madd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-madd.ll
new file mode 100644
index 00000000000000..89503724fd730e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-madd.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmadd.b(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmadd_b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmadd.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmadd.b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmadd.h(<8 x i16>, <8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmadd_h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmadd.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmadd.h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmadd.w(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmadd_w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmadd.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmadd.w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmadd.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmadd_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmadd.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmadd.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-maddw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-maddw.ll
new file mode 100644
index 00000000000000..1e3ab25a5fcf1a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-maddw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwev_h_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.h.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwev_w_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.w.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwev_d_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.d.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwev_q_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.q.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwev_h_bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.h.bu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwev_w_hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.w.hu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwev_d_wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.d.wu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwev_q_du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.q.du $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwev_h_bu_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.h.bu.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwev.h.bu.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwev_w_hu_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.w.hu.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwev.w.hu.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwev_d_wu_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.d.wu.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.d.wu.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwev_q_du_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwev.q.du.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwev.q.du.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwod_h_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.h.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwod_w_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.w.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwod_d_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.d.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwod_q_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.q.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwod_h_bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.h.bu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwod_w_hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.w.hu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwod_d_wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.d.wu $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwod_q_du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.q.du $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16>, <16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmaddwod_h_bu_b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.h.bu.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaddwod.h.bu.b(<8 x i16> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32>, <8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmaddwod_w_hu_h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.w.hu.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaddwod.w.hu.h(<4 x i32> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64>, <4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmaddwod_d_wu_w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.d.wu.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.d.wu.w(<2 x i64> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmaddwod_q_du_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmaddwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaddwod.q.du.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaddwod.q.du.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max.ll
new file mode 100644
index 00000000000000..4dd289cf6ed720
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmax.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmax_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmax.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmax.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmax_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmax.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmax.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmax_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmax.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmax.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmax_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmax.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmaxi.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vmaxi_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.b $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmaxi.b(<16 x i8> %va, i32 -16)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaxi.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vmaxi_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.h $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaxi.h(<8 x i16> %va, i32 -16)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaxi.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vmaxi_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.w $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaxi.w(<4 x i32> %va, i32 15)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaxi.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vmaxi_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.d $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaxi.d(<2 x i64> %va, i32 15)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmax.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmax_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmax.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmax.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmax_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmax.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmax.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmax_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmax.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmax.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmax_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmax_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmax.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmax.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmaxi.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vmaxi_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.bu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmaxi.bu(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmaxi.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vmaxi_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.hu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmaxi.hu(<8 x i16> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmaxi.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vmaxi_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmaxi.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmaxi.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vmaxi_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vmaxi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmaxi.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmaxi.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min.ll
new file mode 100644
index 00000000000000..aa12a5ead6a3f7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmin.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmin_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmin.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmin.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmin_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmin.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmin.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmin_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmin.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmin.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmin_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmin.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmini.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vmini_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.b $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmini.b(<16 x i8> %va, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmini.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vmini_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmini.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmini.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vmini_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.w $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmini.w(<4 x i32> %va, i32 -16)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmini.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vmini_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.d $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmini.d(<2 x i64> %va, i32 -16)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmin.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmin_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmin.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmin.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmin_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmin.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmin.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmin_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmin.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmin.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmin_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmin_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmin.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmin.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmini.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vmini_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.bu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmini.bu(<16 x i8> %va, i32 31)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmini.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vmini_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.hu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmini.hu(<8 x i16> %va, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmini.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vmini_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmini.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmini.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vmini_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vmini_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmini.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmini.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mod.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mod.ll
new file mode 100644
index 00000000000000..6b3dc6865584e5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mod.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmod.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmod_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmod.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmod.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmod_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmod.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmod.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmod_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmod.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmod.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmod_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmod.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmod.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmod_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmod.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmod.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmod_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmod.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmod.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmod_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmod.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmod.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmod_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmod_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmod.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmod.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskgez.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskgez.ll
new file mode 100644
index 00000000000000..3ecd777aee6785
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskgez.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmskgez.b(<16 x i8>)
+
+define <16 x i8> @lsx_vmskgez_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmskgez_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmskgez.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmskgez.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskltz.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskltz.ll
new file mode 100644
index 00000000000000..be00c76137c770
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mskltz.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmskltz.b(<16 x i8>)
+
+define <16 x i8> @lsx_vmskltz_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmskltz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmskltz.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmskltz.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmskltz.h(<8 x i16>)
+
+define <8 x i16> @lsx_vmskltz_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vmskltz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmskltz.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmskltz.h(<8 x i16> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmskltz.w(<4 x i32>)
+
+define <4 x i32> @lsx_vmskltz_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vmskltz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmskltz.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmskltz.w(<4 x i32> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmskltz.d(<2 x i64>)
+
+define <2 x i64> @lsx_vmskltz_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vmskltz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmskltz.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmskltz.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msknz.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msknz.ll
new file mode 100644
index 00000000000000..02f1752f7190dd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msknz.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmsknz.b(<16 x i8>)
+
+define <16 x i8> @lsx_vmsknz_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vmsknz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmsknz.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmsknz.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msub.ll
new file mode 100644
index 00000000000000..98684e10c78e5b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-msub.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmsub.b(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmsub_b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vmsub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmsub.b $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmsub.b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmsub.h(<8 x i16>, <8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmsub_h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vmsub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmsub.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmsub.h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmsub.w(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmsub_w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vmsub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmsub.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmsub.w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmsub.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmsub_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmsub.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmsub.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-muh.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-muh.ll
new file mode 100644
index 00000000000000..a4deb8f8f823e5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-muh.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmuh.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmuh_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmuh.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmuh.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmuh_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmuh.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmuh.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmuh_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmuh.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmuh.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmuh_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmuh.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vmuh.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmuh_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmuh.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmuh.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmuh_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmuh.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmuh.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmuh_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmuh.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmuh.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmuh_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmuh_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmuh.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmuh.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mul.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mul.ll
new file mode 100644
index 00000000000000..aca60d1663b742
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mul.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vmul.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vmul_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmul_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmul.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vmul.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmul.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vmul_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmul_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmul.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmul.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmul.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vmul_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmul_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmul.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmul.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmul.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmul_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmul_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmul.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmul.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mulw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mulw.ll
new file mode 100644
index 00000000000000..eb55c1f809e3aa
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-mulw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwev.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwev_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwev.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwev_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwev_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwev_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwev_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwev_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwev_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwev_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwev_h_bu_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.h.bu.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwev.h.bu.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwev_w_hu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.w.hu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwev.w.hu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwev_d_wu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.d.wu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.d.wu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwev_q_du_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwev.q.du.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwev.q.du.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwod.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwod_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwod.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwod_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwod_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwod_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwod_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwod_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwod_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwod_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vmulwod_h_bu_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.h.bu.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vmulwod.h.bu.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vmulwod_w_hu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.w.hu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vmulwod.w.hu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vmulwod_d_wu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.d.wu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.d.wu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vmulwod_q_du_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vmulwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vmulwod.q.du.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vmulwod.q.du.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-neg.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-neg.ll
new file mode 100644
index 00000000000000..43c6e975761490
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-neg.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vneg.b(<16 x i8>)
+
+define <16 x i8> @lsx_vneg_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vneg_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vneg.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vneg.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vneg.h(<8 x i16>)
+
+define <8 x i16> @lsx_vneg_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vneg_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vneg.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vneg.h(<8 x i16> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vneg.w(<4 x i32>)
+
+define <4 x i32> @lsx_vneg_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vneg_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vneg.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vneg.w(<4 x i32> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vneg.d(<2 x i64>)
+
+define <2 x i64> @lsx_vneg_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vneg_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vneg.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vneg.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nor.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nor.ll
new file mode 100644
index 00000000000000..16619225f2d178
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nor.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vnor.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vnor_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vnor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vnor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vnor.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nori.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nori.ll
new file mode 100644
index 00000000000000..c2388a1e0da377
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-nori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vnori.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vnori_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vnori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vnori.b $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vnori.b(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-or.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-or.ll
new file mode 100644
index 00000000000000..ab557003d1504a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-or.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vor.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vor_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vor.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ori.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ori.ll
new file mode 100644
index 00000000000000..85c0f432c54a2e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vori.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vori_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vori.b $vr0, $vr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vori.b(<16 x i8> %va, i32 3)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-orn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-orn.ll
new file mode 100644
index 00000000000000..4528628e02c3c8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-orn.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vorn.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vorn_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vorn_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vorn.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vorn.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pack.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pack.ll
new file mode 100644
index 00000000000000..70a3620d1757ac
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pack.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vpackev.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vpackev_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackev.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vpackev.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vpackev.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vpackev_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackev.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vpackev.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vpackev.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vpackev_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackev.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpackev.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vpackev.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vpackev_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackev.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vpackev.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vpackod.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vpackod_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackod.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vpackod.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vpackod.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vpackod_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackod.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vpackod.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vpackod.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vpackod_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackod.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpackod.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vpackod.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vpackod_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vpackod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpackod.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vpackod.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pcnt.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pcnt.ll
new file mode 100644
index 00000000000000..431b270ab0a14f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pcnt.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vpcnt.b(<16 x i8>)
+
+define <16 x i8> @lsx_vpcnt_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vpcnt_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcnt.b $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vpcnt.b(<16 x i8> %va)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vpcnt.h(<8 x i16>)
+
+define <8 x i16> @lsx_vpcnt_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vpcnt_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcnt.h $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vpcnt.h(<8 x i16> %va)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vpcnt.w(<4 x i32>)
+
+define <4 x i32> @lsx_vpcnt_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vpcnt_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcnt.w $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpcnt.w(<4 x i32> %va)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vpcnt.d(<2 x i64>)
+
+define <2 x i64> @lsx_vpcnt_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vpcnt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcnt.d $vr0, $vr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vpcnt.d(<2 x i64> %va)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-permi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-permi.ll
new file mode 100644
index 00000000000000..b8367d98caf660
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-permi.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x i32> @llvm.loongarch.lsx.vpermi.w(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vpermi_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vpermi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpermi.w $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpermi.w(<4 x i32> %va, <4 x i32> %vb, i32 255)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pick.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pick.ll
new file mode 100644
index 00000000000000..4ebf29e1409c08
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pick.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vpickev.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vpickev_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickev.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vpickev.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vpickev.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vpickev_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickev.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vpickev.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vpickev.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vpickev_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickev.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpickev.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vpickev.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vpickev_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickev.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vpickev.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vpickod.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vpickod_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickod.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vpickod.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vpickod.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vpickod_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickod.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vpickod.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vpickod.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vpickod_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickod.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vpickod.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vpickod.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vpickod_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vpickod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickod.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vpickod.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll
new file mode 100644
index 00000000000000..ed56d30ce3c46a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8>, i32)
+
+define i32 @lsx_vpickve2gr_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.b $a0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8> %va, i32 15)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.h(<8 x i16>, i32)
+
+define i32 @lsx_vpickve2gr_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.h $a0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.h(<8 x i16> %va, i32 7)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.w(<4 x i32>, i32)
+
+define i32 @lsx_vpickve2gr_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.w $a0, $vr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.w(<4 x i32> %va, i32 3)
+  ret i32 %res
+}
+
+declare i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64>, i32)
+
+define i64 @lsx_vpickve2gr_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.d $a0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 1)
+  ret i64 %res
+}
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8>, i32)
+
+define i32 @lsx_vpickve2gr_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.bu $a0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8> %va, i32 15)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.hu(<8 x i16>, i32)
+
+define i32 @lsx_vpickve2gr_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.hu $a0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.hu(<8 x i16> %va, i32 7)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32>, i32)
+
+define i32 @lsx_vpickve2gr_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.wu $a0, $vr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32> %va, i32 3)
+  ret i32 %res
+}
+
+declare i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64>, i32)
+
+define i64 @lsx_vpickve2gr_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vpickve2gr_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpickve2gr.du $a0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 1)
+  ret i64 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll
new file mode 100644
index 00000000000000..091f1c98c2289a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32)
+
+define <16 x i8> @lsx_vreplgr2vr_b(i32 %a) nounwind {
+; CHECK-LABEL: lsx_vreplgr2vr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplgr2vr.b $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32 %a)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vreplgr2vr.h(i32)
+
+define <8 x i16> @lsx_vreplgr2vr_h(i32 %a) nounwind {
+; CHECK-LABEL: lsx_vreplgr2vr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplgr2vr.h $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vreplgr2vr.h(i32 %a)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32)
+
+define <4 x i32> @lsx_vreplgr2vr_w(i32 %a) nounwind {
+; CHECK-LABEL: lsx_vreplgr2vr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplgr2vr.w $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32 %a)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64)
+
+define <2 x i64> @lsx_vreplgr2vr_d(i64 %a) nounwind {
+; CHECK-LABEL: lsx_vreplgr2vr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplgr2vr.d $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 %a)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replve.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replve.ll
new file mode 100644
index 00000000000000..3ba184dad052b9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replve.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vreplve.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vreplve_b(<16 x i8> %va, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vreplve_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplve.b $vr0, $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vreplve.b(<16 x i8> %va, i32 %b)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vreplve.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vreplve_h(<8 x i16> %va, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vreplve_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplve.h $vr0, $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vreplve.h(<8 x i16> %va, i32 %b)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vreplve.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vreplve_w(<4 x i32> %va, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vreplve_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplve.w $vr0, $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vreplve.w(<4 x i32> %va, i32 %b)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vreplve.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vreplve_d(<2 x i64> %va, i32 %b) nounwind {
+; CHECK-LABEL: lsx_vreplve_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplve.d $vr0, $vr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vreplve.d(<2 x i64> %va, i32 %b)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replvei.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replvei.ll
new file mode 100644
index 00000000000000..9b8af1878cb83d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replvei.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vreplvei.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vreplvei_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vreplvei_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplvei.b $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vreplvei.b(<16 x i8> %va, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vreplvei.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vreplvei_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vreplvei_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplvei.h $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vreplvei.h(<8 x i16> %va, i32 7)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vreplvei.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vreplvei_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vreplvei_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vreplvei.w(<4 x i32> %va, i32 3)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vreplvei.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vreplvei_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vreplvei_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vreplvei.d $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vreplvei.d(<2 x i64> %va, i32 1)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-rotr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-rotr.ll
new file mode 100644
index 00000000000000..df8650677147b6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-rotr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vrotr.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vrotr_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vrotr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotr.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vrotr.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vrotr.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vrotr_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vrotr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotr.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vrotr.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vrotr.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vrotr_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vrotr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotr.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vrotr.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vrotr.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vrotr_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vrotr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotr.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vrotr.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vrotri.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vrotri_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vrotri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotri.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vrotri.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vrotri.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vrotri_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vrotri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotri.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vrotri.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vrotri.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vrotri_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vrotri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotri.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vrotri.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vrotri.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vrotri_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vrotri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vrotri.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vrotri.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sadd.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sadd.ll
new file mode 100644
index 00000000000000..a54f955766dfe6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sadd.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsadd.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsadd_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsadd.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsadd.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsadd_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsadd.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsadd.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsadd_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsadd.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsadd.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsadd_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsadd.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsadd.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsadd_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsadd.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsadd.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsadd_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsadd.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsadd.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsadd_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsadd.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsadd.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsadd_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsadd_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsadd.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsadd.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sat.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sat.ll
new file mode 100644
index 00000000000000..4286842a63b98e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sat.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsat.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsat_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.b $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsat.b(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsat.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsat_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.h $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsat.h(<8 x i16> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsat.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsat_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.w $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsat.w(<4 x i32> %va, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsat.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsat_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.d $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsat.d(<2 x i64> %va, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsat.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsat_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.bu $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsat.bu(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsat.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsat_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.hu $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsat.hu(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsat.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsat_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsat.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsat.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsat_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsat_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsat.du $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsat.du(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-seq.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-seq.ll
new file mode 100644
index 00000000000000..3cb4acd824393b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-seq.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vseq_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vseq_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseq.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vseq.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vseq_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vseq_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseq.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vseq.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vseq_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vseq_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseq.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vseq.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vseq_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vseq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseq.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vseq.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vseqi_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vseqi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseqi.b $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vseqi.b(<16 x i8> %va, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vseqi_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vseqi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseqi.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vseqi.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vseqi_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vseqi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseqi.w $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vseqi.w(<4 x i32> %va, i32 -16)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vseqi_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vseqi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseqi.d $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vseqi.d(<2 x i64> %va, i32 -16)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll
new file mode 100644
index 00000000000000..3188fb4e2c2ef7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lsx.bz.v(<16 x i8>)
+
+define i32 @lsx_bz_v(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_bz_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vseteqz.v $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bz.v(<16 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bnz.v(<16 x i8>)
+
+define i32 @lsx_bnz_v(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_bnz_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetnez.v $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bnz.v(<16 x i8> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll
new file mode 100644
index 00000000000000..22e01922e87bb3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lsx.bnz.b(<16 x i8>)
+
+define i32 @lsx_bnz_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_bnz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetallnez.b $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bnz.b(<16 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bnz.h(<8 x i16>)
+
+define i32 @lsx_bnz_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_bnz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetallnez.h $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bnz.h(<8 x i16> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bnz.w(<4 x i32>)
+
+define i32 @lsx_bnz_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_bnz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetallnez.w $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB2_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bnz.w(<4 x i32> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bnz.d(<2 x i64>)
+
+define i32 @lsx_bnz_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_bnz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetallnez.d $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB3_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bnz.d(<2 x i64> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll
new file mode 100644
index 00000000000000..96c79c10e46889
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lsx.bz.b(<16 x i8>)
+
+define i32 @lsx_bz_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_bz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetanyeqz.b $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bz.b(<16 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bz.h(<8 x i16>)
+
+define i32 @lsx_bz_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_bz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetanyeqz.h $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bz.h(<8 x i16> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bz.w(<4 x i32>)
+
+define i32 @lsx_bz_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_bz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetanyeqz.w $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB2_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bz.w(<4 x i32> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lsx.bz.d(<2 x i64>)
+
+define i32 @lsx_bz_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_bz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetanyeqz.d $fcc0, $vr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB3_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lsx.bz.d(<2 x i64> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf.ll
new file mode 100644
index 00000000000000..f5d516521e45f1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vshuf.b(<16 x i8>, <16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vshuf_b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc) nounwind {
+; CHECK-LABEL: lsx_vshuf_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf.b $vr0, $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vshuf.b(<16 x i8> %va, <16 x i8> %vb, <16 x i8> %vc)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vshuf.h(<8 x i16>, <8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vshuf_h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc) nounwind {
+; CHECK-LABEL: lsx_vshuf_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf.h $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vshuf.h(<8 x i16> %va, <8 x i16> %vb, <8 x i16> %vc)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vshuf.w(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vshuf_w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc) nounwind {
+; CHECK-LABEL: lsx_vshuf_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf.w $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vshuf.w(<4 x i32> %va, <4 x i32> %vb, <4 x i32> %vc)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vshuf.d(<2 x i64>, <2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vshuf_d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc) nounwind {
+; CHECK-LABEL: lsx_vshuf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf.d $vr0, $vr1, $vr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vshuf.d(<2 x i64> %va, <2 x i64> %vb, <2 x i64> %vc)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf4i.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf4i.ll
new file mode 100644
index 00000000000000..1ad5f2af5591e5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-shuf4i.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vshuf4i.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vshuf4i_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vshuf4i_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf4i.b $vr0, $vr0, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vshuf4i.b(<16 x i8> %va, i32 255)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vshuf4i.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vshuf4i_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vshuf4i_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf4i.h $vr0, $vr0, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vshuf4i.h(<8 x i16> %va, i32 255)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vshuf4i.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vshuf4i_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vshuf4i_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf4i.w $vr0, $vr0, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vshuf4i.w(<4 x i32> %va, i32 255)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vshuf4i.d(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vshuf4i_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vshuf4i_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vshuf4i.d $vr0, $vr1, 255
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vshuf4i.d(<2 x i64> %va, <2 x i64> %vb, i32 255)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-signcov.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-signcov.ll
new file mode 100644
index 00000000000000..3997b0cc995c50
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-signcov.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsigncov.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsigncov_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsigncov_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsigncov.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsigncov.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsigncov.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsigncov_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsigncov_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsigncov.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsigncov.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsigncov.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsigncov_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsigncov_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsigncov.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsigncov.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsigncov.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsigncov_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsigncov_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsigncov.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsigncov.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sle.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sle.ll
new file mode 100644
index 00000000000000..5a9d5f06e63f89
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sle.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsle.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsle_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsle.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsle.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsle_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsle.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsle.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsle_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsle.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsle.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsle_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsle.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslei.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vslei_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.b $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslei.b(<16 x i8> %va, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslei.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vslei_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslei.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslei.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vslei_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.w $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslei.w(<4 x i32> %va, i32 -16)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslei.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vslei_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.d $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslei.d(<2 x i64> %va, i32 -16)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsle.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsle_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsle.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsle.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsle_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsle.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsle.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsle_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsle.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsle.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsle_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsle_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsle.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsle.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslei.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vslei_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.bu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslei.bu(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslei.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vslei_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.hu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslei.hu(<8 x i16> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslei.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vslei_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslei.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslei.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vslei_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vslei_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslei.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslei.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sll.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sll.ll
new file mode 100644
index 00000000000000..7bc20af41f17a8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sll.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsll.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsll_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsll_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsll.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsll.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsll.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsll_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsll_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsll.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsll.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsll.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsll_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsll_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsll.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsll.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsll.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsll_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsll_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsll.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsll.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslli.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vslli_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vslli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslli.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslli.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslli.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vslli_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vslli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslli.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslli.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslli.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vslli_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vslli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslli.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslli.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslli.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vslli_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vslli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslli.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslli.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sllwil.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sllwil.ll
new file mode 100644
index 00000000000000..29ab70da1ceda3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sllwil.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vsllwil.h.b(<16 x i8>, i32)
+
+define <8 x i16> @lsx_vsllwil_h_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.h.b $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsllwil.h.b(<16 x i8> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsllwil.w.h(<8 x i16>, i32)
+
+define <4 x i32> @lsx_vsllwil_w_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.w.h $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsllwil.w.h(<8 x i16> %va, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsllwil.d.w(<4 x i32>, i32)
+
+define <2 x i64> @lsx_vsllwil_d_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.d.w $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsllwil.d.w(<4 x i32> %va, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsllwil.hu.bu(<16 x i8>, i32)
+
+define <8 x i16> @lsx_vsllwil_hu_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.hu.bu $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsllwil.hu.bu(<16 x i8> %va, i32 7)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsllwil.wu.hu(<8 x i16>, i32)
+
+define <4 x i32> @lsx_vsllwil_wu_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.wu.hu $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsllwil.wu.hu(<8 x i16> %va, i32 15)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsllwil.du.wu(<4 x i32>, i32)
+
+define <2 x i64> @lsx_vsllwil_du_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsllwil_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsllwil.du.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsllwil.du.wu(<4 x i32> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-slt.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-slt.ll
new file mode 100644
index 00000000000000..18683e9dc46f63
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-slt.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vslt_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslt.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vslt_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslt.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vslt_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslt.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vslt_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslt.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vslti_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.b $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslti.b(<16 x i8> %va, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vslti_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslti.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vslti_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.w $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslti.w(<4 x i32> %va, i32 -16)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vslti_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.d $vr0, $vr0, -16
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslti.d(<2 x i64> %va, i32 -16)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vslt_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslt.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vslt_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslt.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vslt_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslt.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vslt_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vslt_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslt.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslt.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vslti_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.bu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vslti.bu(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vslti_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.hu $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vslti.hu(<8 x i16> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vslti_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vslti.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vslti_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vslti_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vslti.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vslti.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sra.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sra.ll
new file mode 100644
index 00000000000000..e85c8464c18e17
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sra.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsra.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsra_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsra_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsra.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsra.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsra.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsra_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsra_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsra.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsra.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsra.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsra_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsra_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsra.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsra.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsra.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsra_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsra_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsra.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsra.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrai.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrai_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsrai_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrai.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrai.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrai_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsrai_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrai.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrai.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrai_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsrai_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrai.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrai.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrai_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsrai_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrai.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sran.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sran.ll
new file mode 100644
index 00000000000000..4ffe5a704c2c88
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sran.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsran.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vsran_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsran_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsran.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsran.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsran.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vsran_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsran_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsran.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsran.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsran.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vsran_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsran_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsran.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsran.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srani.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srani.ll
new file mode 100644
index 00000000000000..717c641616c8d5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srani.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrani.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrani_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrani_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrani.b.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrani.b.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrani.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrani_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrani_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrani.h.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrani.h.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrani.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrani_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrani_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrani.w.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrani.w.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrani.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrani_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrani_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrani.d.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrani.d.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srar.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srar.ll
new file mode 100644
index 00000000000000..8b52b7ac9631f4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srar.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrar.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsrar_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrar_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrar.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrar.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrar.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsrar_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrar_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrar.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrar.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrar.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsrar_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrar_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrar.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrar.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrar.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsrar_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrar_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrar.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrar.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrari.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrari_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsrari_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrari.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrari.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrari.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrari_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsrari_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrari.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrari.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrari.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrari_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsrari_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrari.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrari.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrari.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrari_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsrari_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrari.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrari.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarn.ll
new file mode 100644
index 00000000000000..d4cdfb5359eaae
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarn.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrarn.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vsrarn_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarn.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrarn.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrarn.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vsrarn_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarn.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrarn.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrarn.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vsrarn_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarn.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrarn.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarni.ll
new file mode 100644
index 00000000000000..2253e88372fcbd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srarni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrarni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrarni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarni.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrarni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrarni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrarni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarni.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrarni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrarni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrarni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarni.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrarni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrarni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrarni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrarni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrarni.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrarni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srl.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srl.ll
new file mode 100644
index 00000000000000..1cddd9622233a1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srl.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrl.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsrl_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrl.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrl.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrl.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsrl_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrl.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrl.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrl.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsrl_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrl.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrl.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrl.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsrl_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrl.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrl.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrli.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrli_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsrli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 7
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrli.b(<16 x i8> %va, i32 7)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrli.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrli_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsrli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrli.h(<8 x i16> %va, i32 15)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrli.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrli_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsrli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrli.w(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrli.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrli_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsrli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrli.d $vr0, $vr0, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrli.d(<2 x i64> %va, i32 63)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srln.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srln.ll
new file mode 100644
index 00000000000000..1c9b23243ffbdb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srln.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrln.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vsrln_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrln_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrln.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrln.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrln.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vsrln_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrln_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrln.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrln.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrln.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vsrln_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrln_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrln.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrln.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlni.ll
new file mode 100644
index 00000000000000..6e523efa182405
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrlni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrlni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlni.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrlni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrlni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrlni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlni.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrlni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrlni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrlni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlni.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrlni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrlni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrlni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlni.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrlni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlr.ll
new file mode 100644
index 00000000000000..51638fa1a47f41
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsrlr_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlr.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsrlr_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlr.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsrlr_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlr.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsrlr_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlr.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrlri_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsrlri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlri.b $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8> %va, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrlri_h(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsrlri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlri.h $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16> %va, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrlri_w(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsrlri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlri.w $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32> %va, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrlri_d(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsrlri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlri.d $vr0, $vr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64> %va, i32 1)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrn.ll
new file mode 100644
index 00000000000000..893e5139624112
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrn.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrlrn.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vsrlrn_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrn.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrlrn.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrlrn.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vsrlrn_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrn.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrlrn.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrlrn.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vsrlrn_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrn.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrlrn.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrni.ll
new file mode 100644
index 00000000000000..d1ea450d2237de
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-srlrni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsrlrni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vsrlrni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrni.b.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsrlrni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsrlrni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vsrlrni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrni.h.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsrlrni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsrlrni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vsrlrni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrni.w.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsrlrni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsrlrni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vsrlrni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsrlrni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsrlrni.d.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsrlrni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssran.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssran.ll
new file mode 100644
index 00000000000000..cecccbb730c950
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssran.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssran.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssran_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssran.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssran.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssran_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssran.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssran.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssran_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssran.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssran.bu.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssran_bu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.bu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssran.bu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssran.hu.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssran_hu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.hu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssran.hu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssran.wu.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssran_wu_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssran_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssran.wu.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssran.wu.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrani.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrani.ll
new file mode 100644
index 00000000000000..57b8eb16986660
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrani.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrani_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrani.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrani_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrani.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrani_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrani.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrani_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrani.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrani_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.bu.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrani.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrani_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.hu.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrani.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrani_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.wu.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrani.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrani_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrani_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrani.du.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrani.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarn.ll
new file mode 100644
index 00000000000000..c6b7d9ec8e1d60
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarn.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrarn.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrarn_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrarn.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrarn.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrarn_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrarn.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrarn.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrarn_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrarn.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrarn.bu.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrarn_bu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.bu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrarn.bu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrarn.hu.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrarn_hu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.hu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrarn.hu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrarn.wu.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrarn_wu_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarn_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarn.wu.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrarn.wu.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarni.ll
new file mode 100644
index 00000000000000..1a2e91962ac3b6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrarni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrarni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrarni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrarni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrarni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrarni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrarni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrarni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrarni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrarni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrarni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrarni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrarni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrarni.bu.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrarni_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.bu.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrarni.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrarni.hu.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrarni_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.hu.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrarni.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrarni.wu.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrarni_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.wu.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrarni.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrarni.du.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrarni_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrarni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrarni.du.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrarni.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrln.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrln.ll
new file mode 100644
index 00000000000000..697ccc3962a81b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrln.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrln.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrln_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrln.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrln.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrln_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrln.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrln.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrln_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrln.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrln.bu.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrln_bu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.bu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrln.bu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrln.hu.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrln_hu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.hu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrln.hu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrln.wu.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrln_wu_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrln_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrln.wu.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrln.wu.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlni.ll
new file mode 100644
index 00000000000000..8dd41e7abe8739
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrlni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrlni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrlni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrlni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrlni_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.bu.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrlni_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.hu.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrlni_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.wu.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrlni_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlni.du.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrn.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrn.ll
new file mode 100644
index 00000000000000..a8e76cbaa7fd12
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrn.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlrn.b.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrlrn_b_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.b.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlrn.b.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlrn.h.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrlrn_h_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.h.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlrn.h.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlrn.w.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrlrn_w_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.w.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlrn.w.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlrn.bu.h(<8 x i16>, <8 x i16>)
+
+define <16 x i8> @lsx_vssrlrn_bu_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.bu.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlrn.bu.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlrn.hu.w(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @lsx_vssrlrn_hu_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.hu.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlrn.hu.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlrn.wu.d(<2 x i64>, <2 x i64>)
+
+define <4 x i32> @lsx_vssrlrn_wu_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrn_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrn.wu.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlrn.wu.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <4 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrni.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrni.ll
new file mode 100644
index 00000000000000..869e81b2b09d65
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssrlrni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlrni.b.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrlrni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.b.h $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlrni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlrni.h.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrlrni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.h.w $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlrni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlrni.w.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrlrni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.w.d $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlrni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrlrni.d.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrlrni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.d.q $vr0, $vr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrlrni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssrlrni.bu.h(<16 x i8>, <16 x i8>, i32)
+
+define <16 x i8> @lsx_vssrlrni_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.bu.h $vr0, $vr1, 15
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssrlrni.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssrlrni.hu.w(<8 x i16>, <8 x i16>, i32)
+
+define <8 x i16> @lsx_vssrlrni_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.hu.w $vr0, $vr1, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssrlrni.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssrlrni.wu.d(<4 x i32>, <4 x i32>, i32)
+
+define <4 x i32> @lsx_vssrlrni_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.wu.d $vr0, $vr1, 63
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssrlrni.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssrlrni.du.q(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i64> @lsx_vssrlrni_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssrlrni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssrlrni.du.q $vr0, $vr1, 127
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssrlrni.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssub.ll
new file mode 100644
index 00000000000000..c594b426d65031
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-ssub.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vssub.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vssub_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssub.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssub.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vssub_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssub.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssub.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vssub_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssub.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssub.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vssub_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssub.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <16 x i8> @llvm.loongarch.lsx.vssub.bu(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vssub_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vssub.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vssub.hu(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vssub_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vssub.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vssub.wu(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vssub_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vssub.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vssub.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vssub_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vssub_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vssub.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vssub.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
new file mode 100644
index 00000000000000..798f509f2318e9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-st.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare void @llvm.loongarch.lsx.vst(<16 x i8>, i8*, i32)
+
+define void @lsx_vst(<16 x i8> %va, i8* %p) nounwind {
+; CHECK-LABEL: lsx_vst:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vst $vr0, $a0, -2048
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vst(<16 x i8> %va, i8* %p, i32 -2048)
+  ret void
+}
+
+declare void @llvm.loongarch.lsx.vstx(<16 x i8>, i8*, i64)
+
+define void @lsx_vstx(<16 x i8> %va, i8* %p, i64 %c) nounwind {
+; CHECK-LABEL: lsx_vstx:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vstx $vr0, $a0, $a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vstx(<16 x i8> %va, i8* %p, i64 %c)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
new file mode 100644
index 00000000000000..6b9e7a9d7462e2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-stelm.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare void @llvm.loongarch.lsx.vstelm.b(<16 x i8>, i8*, i32, i32)
+
+define void @lsx_vstelm_b(<16 x i8> %va, i8* %p) nounwind {
+; CHECK-LABEL: lsx_vstelm_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vstelm.b $vr0, $a0, 1, 15
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vstelm.b(<16 x i8> %va, i8* %p, i32 1, i32 15)
+  ret void
+}
+
+declare void @llvm.loongarch.lsx.vstelm.h(<8 x i16>, i8*, i32, i32)
+
+define void @lsx_vstelm_h(<8 x i16> %va, i8* %p) nounwind {
+; CHECK-LABEL: lsx_vstelm_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vstelm.h $vr0, $a0, 2, 7
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vstelm.h(<8 x i16> %va, i8* %p, i32 2, i32 7)
+  ret void
+}
+
+declare void @llvm.loongarch.lsx.vstelm.w(<4 x i32>, i8*, i32, i32)
+
+define void @lsx_vstelm_w(<4 x i32> %va, i8* %p) nounwind {
+; CHECK-LABEL: lsx_vstelm_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vstelm.w $vr0, $a0, 4, 3
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vstelm.w(<4 x i32> %va, i8* %p, i32 4, i32 3)
+  ret void
+}
+
+declare void @llvm.loongarch.lsx.vstelm.d(<2 x i64>, i8*, i32, i32)
+
+define void @lsx_vstelm_d(<2 x i64> %va, i8* %p) nounwind {
+; CHECK-LABEL: lsx_vstelm_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vstelm.d $vr0, $a0, 8, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lsx.vstelm.d(<2 x i64> %va, i8* %p, i32 8, i32 1)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sub.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sub.ll
new file mode 100644
index 00000000000000..5c04a3d8de0df6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-sub.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsub.b(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vsub_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsub.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsub.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsub.h(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @lsx_vsub_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsub.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsub.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsub.w(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @lsx_vsub_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsub.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsub.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsub.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsub_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsub.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsub.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsub.q(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsub_q(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsub_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsub.q $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsub.q(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subi.ll
new file mode 100644
index 00000000000000..304a4e4a78cc7c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subi.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vsubi.bu(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vsubi_bu(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vsubi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubi.bu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vsubi.bu(<16 x i8> %va, i32 31)
+  ret <16 x i8> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsubi.hu(<8 x i16>, i32)
+
+define <8 x i16> @lsx_vsubi_hu(<8 x i16> %va) nounwind {
+; CHECK-LABEL: lsx_vsubi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubi.hu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsubi.hu(<8 x i16> %va, i32 31)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsubi.wu(<4 x i32>, i32)
+
+define <4 x i32> @lsx_vsubi_wu(<4 x i32> %va) nounwind {
+; CHECK-LABEL: lsx_vsubi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsubi.wu(<4 x i32> %va, i32 31)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubi.du(<2 x i64>, i32)
+
+define <2 x i64> @lsx_vsubi_du(<2 x i64> %va) nounwind {
+; CHECK-LABEL: lsx_vsubi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubi.du $vr0, $vr0, 31
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubi.du(<2 x i64> %va, i32 31)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subw.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subw.ll
new file mode 100644
index 00000000000000..48100db743344e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-subw.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <8 x i16> @llvm.loongarch.lsx.vsubwev.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vsubwev_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsubwev.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vsubwev_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwev.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vsubwev_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwev.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsubwev_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsubwev.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vsubwev_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsubwev.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsubwev.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vsubwev_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsubwev.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwev.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vsubwev_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwev.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwev.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsubwev_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwev.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwev.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsubwod.h.b(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vsubwod_h_b(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.h.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.b(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsubwod.w.h(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vsubwod_w_h(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.w.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.h(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwod.d.w(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vsubwod_d_w(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.d.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.w(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwod.q.d(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsubwod_q_d(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.q.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.d(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}
+
+declare <8 x i16> @llvm.loongarch.lsx.vsubwod.h.bu(<16 x i8>, <16 x i8>)
+
+define <8 x i16> @lsx_vsubwod_h_bu(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.h.bu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i16> @llvm.loongarch.lsx.vsubwod.h.bu(<16 x i8> %va, <16 x i8> %vb)
+  ret <8 x i16> %res
+}
+
+declare <4 x i32> @llvm.loongarch.lsx.vsubwod.w.hu(<8 x i16>, <8 x i16>)
+
+define <4 x i32> @lsx_vsubwod_w_hu(<8 x i16> %va, <8 x i16> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.w.hu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i32> @llvm.loongarch.lsx.vsubwod.w.hu(<8 x i16> %va, <8 x i16> %vb)
+  ret <4 x i32> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwod.d.wu(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @lsx_vsubwod_d_wu(<4 x i32> %va, <4 x i32> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.d.wu $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwod.d.wu(<4 x i32> %va, <4 x i32> %vb)
+  ret <2 x i64> %res
+}
+
+declare <2 x i64> @llvm.loongarch.lsx.vsubwod.q.du(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @lsx_vsubwod_q_du(<2 x i64> %va, <2 x i64> %vb) nounwind {
+; CHECK-LABEL: lsx_vsubwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsubwod.q.du $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <2 x i64> @llvm.loongarch.lsx.vsubwod.q.du(<2 x i64> %va, <2 x i64> %vb)
+  ret <2 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xor.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xor.ll
new file mode 100644
index 00000000000000..72a1fe93c2c013
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xor.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vxor.v(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @lsx_vxor_v(<16 x i8> %va, <16 x i8> %vb) nounwind {
+; CHECK-LABEL: lsx_vxor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vxor.v(<16 x i8> %va, <16 x i8> %vb)
+  ret <16 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xori.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xori.ll
new file mode 100644
index 00000000000000..09669cd5ac14c7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-xori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <16 x i8> @llvm.loongarch.lsx.vxori.b(<16 x i8>, i32)
+
+define <16 x i8> @lsx_vxori_b(<16 x i8> %va) nounwind {
+; CHECK-LABEL: lsx_vxori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vxori.b $vr0, $vr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i8> @llvm.loongarch.lsx.vxori.b(<16 x i8> %va, i32 3)
+  ret <16 x i8> %res
+}


        


More information about the llvm-commits mailing list