[llvm] 83311b2 - [LoongArch] Add LASX intrinsic testcases

via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 19 02:12:51 PDT 2023


Author: chenli
Date: 2023-08-19T17:12:31+08:00
New Revision: 83311b2b5d1b9869f9a7b265994394ea898448a2

URL: https://github.com/llvm/llvm-project/commit/83311b2b5d1b9869f9a7b265994394ea898448a2
DIFF: https://github.com/llvm/llvm-project/commit/83311b2b5d1b9869f9a7b265994394ea898448a2.diff

LOG: [LoongArch] Add LASX intrinsic testcases

Depends on D155830

Reviewed By: SixWeining

Differential Revision: https://reviews.llvm.org/D155835

Added: 
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-absd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-add.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-adda.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-addi.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-addw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-and.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-andi.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-andn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-avg.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-avgr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitclr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitrev.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitsel.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitseli.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitset.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsll.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsrl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-clo.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-clz.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-div.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ext2xv.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-exth.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-extl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-extrins.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fadd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fclass.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcmp.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvt.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvth.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvtl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fdiv.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ffint.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-flogb.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmadd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmax.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmaxa.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmin.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmina.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmsub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmul.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmadd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmsub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecip.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-frint.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrt.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-frstp.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsqrt.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ftint.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-haddw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-hsubw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ilv.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-insgr2vr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-insve0.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldi.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-madd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-maddw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-max.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-min.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-mod.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskgez.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskltz.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-msknz.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-msub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-muh.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-mul.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-mulw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-neg.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-nor.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-nori.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-or.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ori.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-orn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-pack.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-pcnt.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-perm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-pick.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl128vei.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve0.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-rotr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sadd.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sat.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-seq.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf4i.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-signcov.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sle.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sll.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sllwil.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-slt.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sra.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sran.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srani.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srar.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srl.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srln.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlr.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssran.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrani.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrln.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrn.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrni.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-sub.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-subi.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-subw.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-xor.ll
    llvm/test/CodeGen/LoongArch/lasx/intrinsic-xori.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-absd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-absd.ll
new file mode 100644
index 00000000000000..bf54f44357b034
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-absd.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvabsd.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvabsd_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvabsd.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvabsd.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvabsd_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvabsd.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvabsd.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvabsd_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvabsd.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvabsd.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvabsd_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvabsd.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvabsd.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvabsd_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvabsd.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvabsd.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvabsd_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvabsd.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvabsd.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvabsd_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvabsd.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvabsd.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvabsd_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvabsd_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvabsd.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvabsd.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-add.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-add.ll
new file mode 100644
index 00000000000000..0c2f2ace29fc93
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-add.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvadd.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvadd_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvadd.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvadd.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvadd_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvadd.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvadd.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvadd_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvadd.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvadd.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvadd_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvadd.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvadd.q(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvadd_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadd_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadd.q $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvadd.q(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-adda.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-adda.ll
new file mode 100644
index 00000000000000..c1258d53e913ee
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-adda.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvadda.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvadda_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadda_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadda.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvadda.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvadda.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvadda_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadda_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadda.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvadda.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvadda.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvadda_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadda_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadda.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvadda.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvadda.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvadda_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvadda_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvadda.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvadda.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addi.ll
new file mode 100644
index 00000000000000..09b5d07a0151cd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addi.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvaddi.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvaddi_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvaddi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddi.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvaddi.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddi.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvaddi_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvaddi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddi.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddi.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddi.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvaddi_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvaddi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddi.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddi.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddi.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvaddi_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvaddi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddi.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddi.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addw.ll
new file mode 100644
index 00000000000000..ef7a1b5a50efb1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-addw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwev_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwev_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwev_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwev_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwev_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwev_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwev_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwev_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwev_h_bu_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.h.bu.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwev.h.bu.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwev_w_hu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.w.hu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwev.w.hu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwev_d_wu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.d.wu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.d.wu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwev_q_du_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwev.q.du.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwev.q.du.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwod_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwod_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwod_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwod_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwod_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwod_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwod_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwod_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvaddwod_h_bu_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.h.bu.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvaddwod.h.bu.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvaddwod_w_hu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.w.hu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvaddwod.w.hu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvaddwod_d_wu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.d.wu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.d.wu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvaddwod_q_du_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvaddwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvaddwod.q.du.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvaddwod.q.du.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-and.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-and.ll
new file mode 100644
index 00000000000000..15f3a8094770b0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-and.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvand.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvand_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvand_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvand.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvand.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andi.ll
new file mode 100644
index 00000000000000..88cf142d696823
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andi.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvandi.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvandi_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvandi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvandi.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvandi.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andn.ll
new file mode 100644
index 00000000000000..f385ef3661cb9d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-andn.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvandn.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvandn_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvandn_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvandn.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvandn.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avg.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avg.ll
new file mode 100644
index 00000000000000..488d3b96b00384
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avg.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvavg.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvavg_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvavg.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvavg.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvavg_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvavg.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvavg.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvavg_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvavg.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvavg.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvavg_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvavg.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvavg.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvavg_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvavg.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvavg.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvavg_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvavg.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvavg.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvavg_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvavg.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvavg.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvavg_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavg_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavg.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvavg.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avgr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avgr.ll
new file mode 100644
index 00000000000000..b5ab5a5366aafe
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-avgr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvavgr.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvavgr_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvavgr.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvavgr.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvavgr_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvavgr.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvavgr.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvavgr_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvavgr.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvavgr.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvavgr_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvavgr.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvavgr.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvavgr_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvavgr.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvavgr.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvavgr_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvavgr.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvavgr.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvavgr_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvavgr.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvavgr.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvavgr_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvavgr_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvavgr.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvavgr.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitclr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitclr.ll
new file mode 100644
index 00000000000000..cec71bab2fe84c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitclr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitclr.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvbitclr_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitclr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclr.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitclr.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitclr.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvbitclr_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitclr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclr.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitclr.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitclr.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvbitclr_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitclr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclr.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitclr.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitclr.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvbitclr_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitclr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclr.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitclr.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitclri.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbitclri_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitclri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclri.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitclri.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitclri.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvbitclri_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitclri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclri.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitclri.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitclri.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvbitclri_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitclri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclri.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitclri.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitclri.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvbitclri_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitclri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitclri.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitclri.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitrev.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitrev.ll
new file mode 100644
index 00000000000000..fb4f9fbc2e4b39
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitrev.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitrev.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvbitrev_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitrev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrev.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitrev.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitrev.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvbitrev_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitrev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrev.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitrev.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitrev.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvbitrev_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitrev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrev.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitrev.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitrev.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvbitrev_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitrev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrev.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitrev.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitrevi.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbitrevi_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitrevi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrevi.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitrevi.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitrevi.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvbitrevi_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitrevi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrevi.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitrevi.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitrevi.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvbitrevi_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitrevi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrevi.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitrevi.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitrevi.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvbitrevi_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitrevi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitrevi.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitrevi.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitsel.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitsel.ll
new file mode 100644
index 00000000000000..2e91407590ac16
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitsel.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitsel.v(<32 x i8>, <32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvbitsel_v(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvbitsel_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitsel.v $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitsel.v(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitseli.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitseli.ll
new file mode 100644
index 00000000000000..79dd55cbfef988
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitseli.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitseli.b(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbitseli_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitseli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitseli.b $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitseli.b(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitset.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitset.ll
new file mode 100644
index 00000000000000..83d1f0ef60c63c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bitset.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitset.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvbitset_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitset_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitset.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitset.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitset.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvbitset_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitset_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitset.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitset.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitset.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvbitset_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitset_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitset.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitset.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitset.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvbitset_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvbitset_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitset.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitset.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbitseti.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbitseti_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitseti_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitseti.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbitseti.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvbitseti.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvbitseti_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitseti_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitseti.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvbitseti.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvbitseti.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvbitseti_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitseti_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitseti.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvbitseti.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvbitseti.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvbitseti_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvbitseti_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbitseti.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvbitseti.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsll.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsll.ll
new file mode 100644
index 00000000000000..cbb63ced5cc00d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsll.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbsll.v(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbsll_v(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvbsll_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbsll.v $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbsll.v(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsrl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsrl.ll
new file mode 100644
index 00000000000000..b0c26cbe3e35c3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-bsrl.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvbsrl.v(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvbsrl_v(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvbsrl_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvbsrl.v $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvbsrl.v(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clo.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clo.ll
new file mode 100644
index 00000000000000..29b2be03d54eca
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clo.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvclo.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvclo_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvclo_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclo.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvclo.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvclo.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvclo_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvclo_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclo.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvclo.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvclo.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvclo_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvclo_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclo.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvclo.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvclo.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvclo_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvclo_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclo.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvclo.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clz.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clz.ll
new file mode 100644
index 00000000000000..5247ceedbd146e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-clz.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvclz.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvclz_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvclz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclz.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvclz.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvclz.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvclz_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvclz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclz.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvclz.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvclz.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvclz_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvclz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclz.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvclz.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvclz.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvclz_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvclz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvclz.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvclz.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-div.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-div.ll
new file mode 100644
index 00000000000000..813204092e944a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-div.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvdiv.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvdiv_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvdiv.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvdiv.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvdiv_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvdiv.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvdiv.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvdiv_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvdiv.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvdiv.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvdiv_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvdiv.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvdiv.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvdiv_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvdiv.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvdiv.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvdiv_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvdiv.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvdiv.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvdiv_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvdiv.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvdiv.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvdiv_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvdiv_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvdiv.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvdiv.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ext2xv.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ext2xv.ll
new file mode 100644
index 00000000000000..48721b52af0091
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ext2xv.ll
@@ -0,0 +1,146 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.vext2xv.h.b(<32 x i8>)
+
+define <16 x i16> @lasx_vext2xv_h_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.h.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.vext2xv.h.b(<32 x i8> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.vext2xv.w.b(<32 x i8>)
+
+define <8 x i32> @lasx_vext2xv_w_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_w_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.w.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.vext2xv.w.b(<32 x i8> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.d.b(<32 x i8>)
+
+define <4 x i64> @lasx_vext2xv_d_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_d_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.d.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.b(<32 x i8> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.vext2xv.w.h(<16 x i16>)
+
+define <8 x i32> @lasx_vext2xv_w_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.w.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.vext2xv.w.h(<16 x i16> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.d.h(<16 x i16>)
+
+define <4 x i64> @lasx_vext2xv_d_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_d_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.d.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.h(<16 x i16> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.d.w(<8 x i32>)
+
+define <4 x i64> @lasx_vext2xv_d_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.d.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.d.w(<8 x i32> %va)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.vext2xv.hu.bu(<32 x i8>)
+
+define <16 x i16> @lasx_vext2xv_hu_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.hu.bu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.vext2xv.hu.bu(<32 x i8> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.bu(<32 x i8>)
+
+define <8 x i32> @lasx_vext2xv_wu_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_wu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.wu.bu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.bu(<32 x i8> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.du.bu(<32 x i8>)
+
+define <4 x i64> @lasx_vext2xv_du_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_du_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.du.bu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.bu(<32 x i8> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.hu(<16 x i16>)
+
+define <8 x i32> @lasx_vext2xv_wu_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.wu.hu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.vext2xv.wu.hu(<16 x i16> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.du.hu(<16 x i16>)
+
+define <4 x i64> @lasx_vext2xv_du_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_du_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.du.hu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.hu(<16 x i16> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.vext2xv.du.wu(<8 x i32>)
+
+define <4 x i64> @lasx_vext2xv_du_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_vext2xv_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vext2xv.du.wu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.vext2xv.du.wu(<8 x i32> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-exth.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-exth.ll
new file mode 100644
index 00000000000000..543589e61b12f7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-exth.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvexth.h.b(<32 x i8>)
+
+define <16 x i16> @lasx_xvexth_h_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.h.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvexth.h.b(<32 x i8> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvexth.w.h(<16 x i16>)
+
+define <8 x i32> @lasx_xvexth_w_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.w.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvexth.w.h(<16 x i16> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvexth.d.w(<8 x i32>)
+
+define <4 x i64> @lasx_xvexth_d_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.d.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvexth.d.w(<8 x i32> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvexth.q.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvexth_q_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.q.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvexth.q.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvexth.hu.bu(<32 x i8>)
+
+define <16 x i16> @lasx_xvexth_hu_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.hu.bu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvexth.hu.bu(<32 x i8> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvexth.wu.hu(<16 x i16>)
+
+define <8 x i32> @lasx_xvexth_wu_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.wu.hu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvexth.wu.hu(<16 x i16> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvexth.du.wu(<8 x i32>)
+
+define <4 x i64> @lasx_xvexth_du_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.du.wu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvexth.du.wu(<8 x i32> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvexth.qu.du(<4 x i64>)
+
+define <4 x i64> @lasx_xvexth_qu_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvexth_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvexth.qu.du $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvexth.qu.du(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extl.ll
new file mode 100644
index 00000000000000..7040c8c784cdfe
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extl.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <4 x i64> @llvm.loongarch.lasx.xvextl.q.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvextl_q_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvextl_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextl.q.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvextl.q.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvextl.qu.du(<4 x i64>)
+
+define <4 x i64> @lasx_xvextl_qu_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvextl_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextl.qu.du $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvextl.qu.du(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extrins.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extrins.ll
new file mode 100644
index 00000000000000..c8774a7b29c0b5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-extrins.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvextrins.b(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvextrins_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvextrins_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextrins.b $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvextrins.b(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvextrins.h(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvextrins_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvextrins_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextrins.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvextrins.h(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvextrins.w(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvextrins_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvextrins_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextrins.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvextrins.w(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvextrins.d(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvextrins_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvextrins_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvextrins.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvextrins.d(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fadd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fadd.ll
new file mode 100644
index 00000000000000..563a0ce9e384d5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfadd.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfadd_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfadd.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfadd.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfadd.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfadd_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfadd.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fclass.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fclass.ll
new file mode 100644
index 00000000000000..901ca5bb026019
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fclass.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfclass.s(<8 x float>)
+
+define <8 x i32> @lasx_xvfclass_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfclass_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfclass.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfclass.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfclass.d(<4 x double>)
+
+define <4 x i64> @lasx_xvfclass_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfclass_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfclass.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfclass.d(<4 x double> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcmp.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcmp.ll
new file mode 100644
index 00000000000000..b01f908e71af5b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcmp.ll
@@ -0,0 +1,530 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.caf.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_caf_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_caf_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.caf.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.caf.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.caf.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_caf_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_caf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.caf.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.caf.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cun.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cun_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cun_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cun.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cun.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cun.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cun_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cun_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cun.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cun.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.ceq.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_ceq_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_ceq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.ceq.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.ceq.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.ceq.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_ceq_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_ceq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.ceq.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.ceq.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cueq.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cueq_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cueq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cueq.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cueq.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cueq.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cueq_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cueq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cueq.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cueq.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.clt.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_clt_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_clt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.clt.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.clt.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.clt.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_clt_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_clt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.clt.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.clt.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cult.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cult_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cult_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cult.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cult.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cult.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cult_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cult_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cult.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cult.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cle.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cle_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cle_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cle.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cle.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cle.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cle_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cle.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cle.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cule.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cule_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cule_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cule.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cule.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cule.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cule_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cule_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cule.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cule.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cne.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cne_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cne.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cne.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cne.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cne_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cne.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cne.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cor.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cor_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cor_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cor.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cor.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cor.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cor_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cor_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cor.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cor.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.cune.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_cune_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cune_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cune.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.cune.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.cune.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_cune_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_cune_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.cune.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.cune.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.saf.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_saf_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_saf_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.saf.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.saf.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.saf.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_saf_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_saf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.saf.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.saf.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sun.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sun_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sun_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sun.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sun.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sun.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sun_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sun_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sun.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sun.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.seq.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_seq_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_seq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.seq.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.seq.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.seq.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_seq_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_seq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.seq.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.seq.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sueq.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sueq_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sueq_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sueq.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sueq.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sueq.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sueq_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sueq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sueq.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sueq.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.slt.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_slt_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_slt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.slt.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.slt.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.slt.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_slt_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_slt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.slt.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.slt.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sult.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sult_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sult_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sult.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sult.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sult.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sult_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sult_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sult.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sult.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sle.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sle_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sle_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sle.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sle.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sle.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sle_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sle.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sle.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sule.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sule_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sule_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sule.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sule.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sule.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sule_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sule_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sule.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sule.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sne.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sne_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sne.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sne.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sne.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sne_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sne.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sne.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sor.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sor_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sor_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sor.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sor.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sor.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sor_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sor_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sor.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sor.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvfcmp.sune.s(<8 x float>, <8 x float>)
+
+define <8 x i32> @lasx_xvfcmp_sune_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sune_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sune.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvfcmp.sune.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvfcmp.sune.d(<4 x double>, <4 x double>)
+
+define <4 x i64> @lasx_xvfcmp_sune_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcmp_sune_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcmp.sune.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvfcmp.sune.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvt.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvt.ll
new file mode 100644
index 00000000000000..82bf1d3df72c6c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvfcvt.h.s(<8 x float>, <8 x float>)
+
+define <16 x i16> @lasx_xvfcvt_h_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcvt_h_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvt.h.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvfcvt.h.s(<8 x float> %va, <8 x float> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvfcvt.s.d(<4 x double>, <4 x double>)
+
+define <8 x float> @lasx_xvfcvt_s_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfcvt_s_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvt.s.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfcvt.s.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x float> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvth.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvth.ll
new file mode 100644
index 00000000000000..e1a6a2923e6770
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvth.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfcvth.s.h(<16 x i16>)
+
+define <8 x float> @lasx_xvfcvth_s_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvfcvth_s_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvth.s.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfcvth.s.h(<16 x i16> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfcvth.d.s(<8 x float>)
+
+define <4 x double> @lasx_xvfcvth_d_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfcvth_d_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvth.d.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfcvth.d.s(<8 x float> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvtl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvtl.ll
new file mode 100644
index 00000000000000..0b3e693c7f51de
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fcvtl.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfcvtl.s.h(<16 x i16>)
+
+define <8 x float> @lasx_xvfcvtl_s_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvfcvtl_s_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvtl.s.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfcvtl.s.h(<16 x i16> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfcvtl.d.s(<8 x float>)
+
+define <4 x double> @lasx_xvfcvtl_d_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfcvtl_d_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfcvtl.d.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfcvtl.d.s(<8 x float> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fdiv.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fdiv.ll
new file mode 100644
index 00000000000000..49923ddd4e8dec
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fdiv.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfdiv.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfdiv_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfdiv_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfdiv.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfdiv.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfdiv.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfdiv_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfdiv_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfdiv.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfdiv.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ffint.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ffint.ll
new file mode 100644
index 00000000000000..24da0bd3383877
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ffint.ll
@@ -0,0 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvffint.s.w(<8 x i32>)
+
+define <8 x float> @lasx_xvffint_s_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvffint_s_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffint.s.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvffint.s.w(<8 x i32> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvffint.d.l(<4 x i64>)
+
+define <4 x double> @lasx_xvffint_d_l(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvffint_d_l:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffint.d.l $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvffint.d.l(<4 x i64> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvffint.s.wu(<8 x i32>)
+
+define <8 x float> @lasx_xvffint_s_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvffint_s_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffint.s.wu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvffint.s.wu(<8 x i32> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvffint.d.lu(<4 x i64>)
+
+define <4 x double> @lasx_xvffint_d_lu(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvffint_d_lu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffint.d.lu $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvffint.d.lu(<4 x i64> %va)
+  ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvffintl.d.w(<8 x i32>)
+
+define <4 x double> @lasx_xvffintl_d_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvffintl_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffintl.d.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvffintl.d.w(<8 x i32> %va)
+  ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvffinth.d.w(<8 x i32>)
+
+define <4 x double> @lasx_xvffinth_d_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvffinth_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffinth.d.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvffinth.d.w(<8 x i32> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvffint.s.l(<4 x i64>, <4 x i64>)
+
+define <8 x float> @lasx_xvffint_s_l(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvffint_s_l:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvffint.s.l $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvffint.s.l(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x float> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-flogb.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-flogb.ll
new file mode 100644
index 00000000000000..bccef4504d70e2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-flogb.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvflogb.s(<8 x float>)
+
+define <8 x float> @lasx_xvflogb_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvflogb_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvflogb.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvflogb.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvflogb.d(<4 x double>)
+
+define <4 x double> @lasx_xvflogb_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvflogb_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvflogb.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvflogb.d(<4 x double> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmadd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmadd.ll
new file mode 100644
index 00000000000000..0fc06f97166028
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmadd.s(<8 x float>, <8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmadd_s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfmadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmadd.s $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmadd.s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmadd.d(<4 x double>, <4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmadd_d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmadd.d $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmadd.d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmax.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmax.ll
new file mode 100644
index 00000000000000..2422fa0c00d8bf
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmax.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmax.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmax_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmax_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmax.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmax.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmax.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmax_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmax_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmax.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmaxa.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmaxa.ll
new file mode 100644
index 00000000000000..cd9ccc656aef66
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmaxa.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmaxa.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmaxa_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmaxa_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmaxa.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmaxa.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmaxa.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmaxa_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmaxa_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmaxa.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmaxa.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmin.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmin.ll
new file mode 100644
index 00000000000000..effb3f9e1d75a8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmin.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmin.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmin_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmin_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmin.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmin.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmin.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmin_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmin_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmin.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmin.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmina.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmina.ll
new file mode 100644
index 00000000000000..753a6f31ba061c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmina.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmina.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmina_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmina_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmina.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmina.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmina.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmina_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmina_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmina.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmina.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmsub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmsub.ll
new file mode 100644
index 00000000000000..57909d0dd1689f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmsub.s(<8 x float>, <8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmsub_s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfmsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmsub.s $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmsub.s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmsub.d(<4 x double>, <4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmsub_d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmsub.d $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmsub.d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmul.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmul.ll
new file mode 100644
index 00000000000000..9cad6f38306618
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fmul.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfmul.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfmul_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmul_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmul.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfmul.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfmul.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfmul_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfmul_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfmul.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfmul.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmadd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmadd.ll
new file mode 100644
index 00000000000000..c30993590f98a8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmadd.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfnmadd.s(<8 x float>, <8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfnmadd_s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfnmadd_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfnmadd.s $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfnmadd.s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfnmadd.d(<4 x double>, <4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfnmadd_d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfnmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfnmadd.d $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfnmadd.d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmsub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmsub.ll
new file mode 100644
index 00000000000000..2e7ca695be6256
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fnmsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfnmsub.s(<8 x float>, <8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfnmsub_s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfnmsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfnmsub.s $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfnmsub.s(<8 x float> %va, <8 x float> %vb, <8 x float> %vc)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfnmsub.d(<4 x double>, <4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfnmsub_d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfnmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfnmsub.d $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfnmsub.d(<4 x double> %va, <4 x double> %vb, <4 x double> %vc)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecip.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecip.ll
new file mode 100644
index 00000000000000..da3a26df2824e8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecip.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrecip.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrecip_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrecip_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrecip.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrecip.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrecip.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrecip_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrecip_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrecip.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrecip.d(<4 x double> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frint.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frint.ll
new file mode 100644
index 00000000000000..ddead27cd14b5b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frint.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrintrne.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrintrne_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrne_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrne.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrintrne.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrintrne.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrintrne_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrne_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrne.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrintrne.d(<4 x double> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrintrz.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrintrz_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrz_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrz.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrintrz.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrintrz.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrintrz_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrz.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrintrz.d(<4 x double> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrintrp.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrintrp_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrp_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrp.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrintrp.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrintrp.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrintrp_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrp_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrp.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrintrp.d(<4 x double> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrintrm.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrintrm_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrm_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrm.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrintrm.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrintrm.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrintrm_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrintrm_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrintrm.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrintrm.d(<4 x double> %va)
+  ret <4 x double> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrint.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrint_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrint_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrint.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrint.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrint.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrint_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrint_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrint.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrint.d(<4 x double> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrt.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrt.ll
new file mode 100644
index 00000000000000..6efa8122baf180
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfrsqrt.s(<8 x float>)
+
+define <8 x float> @lasx_xvfrsqrt_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrsqrt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrsqrt.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfrsqrt.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfrsqrt.d(<4 x double>)
+
+define <4 x double> @lasx_xvfrsqrt_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfrsqrt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrsqrt.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfrsqrt.d(<4 x double> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frstp.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frstp.ll
new file mode 100644
index 00000000000000..e83e55a52a113d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frstp.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvfrstp.b(<32 x i8>, <32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvfrstp_b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfrstp_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrstp.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvfrstp.b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvfrstp.h(<16 x i16>, <16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvfrstp_h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvfrstp_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrstp.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvfrstp.h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <16 x i16> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvfrstpi.b(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvfrstpi_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfrstpi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrstpi.b $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvfrstpi.b(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvfrstpi.h(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvfrstpi_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfrstpi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfrstpi.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvfrstpi.h(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsqrt.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsqrt.ll
new file mode 100644
index 00000000000000..a13333d8d81c29
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsqrt.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfsqrt.s(<8 x float>)
+
+define <8 x float> @lasx_xvfsqrt_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvfsqrt_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfsqrt.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfsqrt.s(<8 x float> %va)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfsqrt.d(<4 x double>)
+
+define <4 x double> @lasx_xvfsqrt_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvfsqrt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfsqrt.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfsqrt.d(<4 x double> %va)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsub.ll
new file mode 100644
index 00000000000000..b52774a03618dd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fsub.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.loongarch.lasx.xvfsub.s(<8 x float>, <8 x float>)
+
+define <8 x float> @lasx_xvfsub_s(<8 x float> %va, <8 x float> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfsub_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfsub.s $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvfsub.s(<8 x float> %va, <8 x float> %vb)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvfsub.d(<4 x double>, <4 x double>)
+
+define <4 x double> @lasx_xvfsub_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvfsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvfsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvfsub.d(<4 x double> %va, <4 x double> %vb)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ftint.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ftint.ll
new file mode 100644
index 00000000000000..74cd507f16d263
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ftint.ll
@@ -0,0 +1,350 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftintrne_w_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrne_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrne.w.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrne.l.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftintrne_l_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrne_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrne.l.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrne.l.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftintrz_w_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrz_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrz.w.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrz.l.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftintrz_l_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrz_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrz.l.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrz.l.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftintrp_w_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrp_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrp.w.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrp.l.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftintrp_l_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrp_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrp.l.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrp.l.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftintrm_w_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrm_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrm.w.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrm.l.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftintrm_l_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrm_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrm.l.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrm.l.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftint.w.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftint_w_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftint_w_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftint.w.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftint.w.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftint.l.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftint_l_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftint_l_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftint.l.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftint.l.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrz.wu.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftintrz_wu_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrz_wu_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrz.wu.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrz.wu.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrz.lu.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftintrz_lu_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrz_lu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrz.lu.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrz.lu.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftint.wu.s(<8 x float>)
+
+define <8 x i32> @lasx_xvftint_wu_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftint_wu_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftint.wu.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftint.wu.s(<8 x float> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftint.lu.d(<4 x double>)
+
+define <4 x i64> @lasx_xvftint_lu_d(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvftint_lu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftint.lu.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftint.lu.d(<4 x double> %va)
+  ret <4 x i64> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.d(<4 x double>, <4 x double>)
+
+define <8 x i32> @lasx_xvftintrne_w_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvftintrne_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrne.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrne.w.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.d(<4 x double>, <4 x double>)
+
+define <8 x i32> @lasx_xvftintrz_w_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvftintrz_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrz.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrz.w.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.d(<4 x double>, <4 x double>)
+
+define <8 x i32> @lasx_xvftintrp_w_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvftintrp_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrp.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrp.w.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.d(<4 x double>, <4 x double>)
+
+define <8 x i32> @lasx_xvftintrm_w_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvftintrm_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrm.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftintrm.w.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvftint.w.d(<4 x double>, <4 x double>)
+
+define <8 x i32> @lasx_xvftint_w_d(<4 x double> %va, <4 x double> %vb) nounwind {
+; CHECK-LABEL: lasx_xvftint_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftint.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvftint.w.d(<4 x double> %va, <4 x double> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrnel.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrnel_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrnel_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrnel.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrnel.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrneh.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrneh_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrneh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrneh.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrneh.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrzl.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrzl_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrzl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrzl.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrzl.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrzh.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrzh_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrzh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrzh.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrzh.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrpl.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrpl_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrpl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrpl.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrpl.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrph.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrph_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrph_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrph.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrph.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrml.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrml_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrml_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrml.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrml.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintrmh.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintrmh_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintrmh_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintrmh.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintrmh.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftintl.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftintl_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftintl_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftintl.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftintl.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvftinth.l.s(<8 x float>)
+
+define <4 x i64> @lasx_xvftinth_l_s(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvftinth_l_s:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvftinth.l.s $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvftinth.l.s(<8 x float> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-haddw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-haddw.ll
new file mode 100644
index 00000000000000..2c64ab23806b5c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-haddw.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvhaddw.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvhaddw_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvhaddw.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvhaddw.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvhaddw_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvhaddw.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhaddw.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvhaddw_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhaddw.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhaddw.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvhaddw_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhaddw.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvhaddw.hu.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvhaddw_hu_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.hu.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvhaddw.hu.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvhaddw.wu.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvhaddw_wu_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.wu.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvhaddw.wu.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhaddw.du.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvhaddw_du_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.du.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhaddw.du.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhaddw.qu.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvhaddw_qu_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhaddw_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhaddw.qu.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhaddw.qu.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-hsubw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-hsubw.ll
new file mode 100644
index 00000000000000..a5223c1d89a044
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-hsubw.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvhsubw.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvhsubw_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvhsubw.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvhsubw.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvhsubw_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvhsubw.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhsubw.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvhsubw_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhsubw.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhsubw.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvhsubw_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhsubw.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvhsubw.hu.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvhsubw_hu_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.hu.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvhsubw.hu.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvhsubw.wu.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvhsubw_wu_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.wu.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvhsubw.wu.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhsubw.du.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvhsubw_du_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.du.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhsubw.du.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvhsubw.qu.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvhsubw_qu_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvhsubw_qu_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvhsubw.qu.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvhsubw.qu.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ilv.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ilv.ll
new file mode 100644
index 00000000000000..c9d0ca6b0324a2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ilv.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvilvl.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvilvl_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvl.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvilvl.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvilvl.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvilvl_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvl.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvilvl.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvilvl.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvilvl_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvl.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvilvl.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvilvl.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvilvl_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvl.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvilvl.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvilvh.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvilvh_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvh_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvh.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvilvh.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvilvh.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvilvh_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvh_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvh.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvilvh.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvilvh.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvilvh_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvh_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvh.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvilvh.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvilvh.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvilvh_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvilvh_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvilvh.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvilvh.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insgr2vr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insgr2vr.ll
new file mode 100644
index 00000000000000..ea98c96464aed8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insgr2vr.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvinsgr2vr.w(<8 x i32>, i32, i32)
+
+define <8 x i32> @lasx_xvinsgr2vr_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvinsgr2vr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    xvinsgr2vr.w $xr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvinsgr2vr.w(<8 x i32> %va, i32 1, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64>, i64, i32)
+
+define <4 x i64> @lasx_xvinsgr2vr_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvinsgr2vr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a0, $zero, 1
+; CHECK-NEXT:    xvinsgr2vr.d $xr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64> %va, i64 1, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insve0.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insve0.ll
new file mode 100644
index 00000000000000..27ae819c4144c5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-insve0.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvinsve0.w(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvinsve0_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvinsve0_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvinsve0.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvinsve0.w(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvinsve0.d(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvinsve0_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvinsve0_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvinsve0.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvinsve0.d(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
new file mode 100644
index 00000000000000..5ffc629db4668b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ld.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvld(i8*, i32)
+
+define <32 x i8> @lasx_xvld(i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvld:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvld(i8* %p, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvldx(i8*, i64)
+
+define <32 x i8> @lasx_xvldx(i8* %p, i64 %b) nounwind {
+; CHECK-LABEL: lasx_xvldx:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldx $xr0, $a0, $a1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldx(i8* %p, i64 %b)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldi.ll
new file mode 100644
index 00000000000000..59f79dd32af367
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldi.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <4 x i64> @llvm.loongarch.lasx.xvldi(i32)
+
+define <4 x i64> @lasx_xvldi() nounwind {
+; CHECK-LABEL: lasx_xvldi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldi $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldi(i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvrepli.b(i32)
+
+define <32 x i8> @lasx_xvrepli_b() nounwind {
+; CHECK-LABEL: lasx_xvrepli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepli.b $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvrepli.b(i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvrepli.h(i32)
+
+define <16 x i16> @lasx_xvrepli_h() nounwind {
+; CHECK-LABEL: lasx_xvrepli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepli.h $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvrepli.h(i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvrepli.w(i32)
+
+define <8 x i32> @lasx_xvrepli_w() nounwind {
+; CHECK-LABEL: lasx_xvrepli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepli.w $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvrepli.w(i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvrepli.d(i32)
+
+define <4 x i64> @lasx_xvrepli_d() nounwind {
+; CHECK-LABEL: lasx_xvrepli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepli.d $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvrepli.d(i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
new file mode 100644
index 00000000000000..ae6abdf81cbc58
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ldrepl.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8*, i32)
+
+define <32 x i8> @lasx_xvldrepl_b(i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvldrepl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldrepl.b $xr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvldrepl.b(i8* %p, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8*, i32)
+
+define <16 x i16> @lasx_xvldrepl_h(i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvldrepl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldrepl.h $xr0, $a0, 2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvldrepl.h(i8* %p, i32 2)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8*, i32)
+
+define <8 x i32> @lasx_xvldrepl_w(i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvldrepl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldrepl.w $xr0, $a0, 4
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvldrepl.w(i8* %p, i32 4)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8*, i32)
+
+define <4 x i64> @lasx_xvldrepl_d(i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvldrepl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvldrepl.d $xr0, $a0, 8
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvldrepl.d(i8* %p, i32 8)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-madd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-madd.ll
new file mode 100644
index 00000000000000..d3b09396727e79
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-madd.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmadd.b(<32 x i8>, <32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmadd_b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmadd.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmadd.b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmadd.h(<16 x i16>, <16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmadd_h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmadd.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmadd.h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmadd.w(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmadd_w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmadd.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmadd.w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmadd.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmadd_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmadd.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmadd.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-maddw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-maddw.ll
new file mode 100644
index 00000000000000..146624a764a22e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-maddw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.b(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwev_h_b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.h.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.h(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwev_w_h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.w.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.w(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwev_d_w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.d.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwev_q_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.q.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwev_h_bu(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.h.bu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwev_w_hu(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.w.hu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwev_d_wu(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.d.wu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwev_q_du(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.q.du $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu.b(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwev_h_bu_b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.h.bu.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwev.h.bu.b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu.h(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwev_w_hu_h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.w.hu.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwev.w.hu.h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu.w(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwev_d_wu_w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.d.wu.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.d.wu.w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwev_q_du_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwev.q.du.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwev.q.du.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.b(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwod_h_b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.h.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.h(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwod_w_h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.w.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.w(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwod_d_w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.d.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwod_q_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.q.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwod_h_bu(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.h.bu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwod_w_hu(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.w.hu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwod_d_wu(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.d.wu $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwod_q_du(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.q.du $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu.b(<16 x i16>, <32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmaddwod_h_bu_b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.h.bu.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaddwod.h.bu.b(<16 x i16> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu.h(<8 x i32>, <16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmaddwod_w_hu_h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.w.hu.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaddwod.w.hu.h(<8 x i32> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu.w(<4 x i64>, <8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmaddwod_d_wu_w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.d.wu.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.d.wu.w(<4 x i64> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmaddwod_q_du_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmaddwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaddwod.q.du.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaddwod.q.du.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max.ll
new file mode 100644
index 00000000000000..9cf09df4439ad9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmax.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmax_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmax.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmax.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmax_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmax.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmax.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmax_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmax.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmax.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmax_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmax.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmaxi.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvmaxi_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmaxi.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaxi.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvmaxi_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaxi.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaxi.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvmaxi_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaxi.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaxi.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvmaxi_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaxi.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmax.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_vmax_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_vmax_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmax.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmax.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmax_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmax.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmax.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmax_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmax.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmax.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmax_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmax_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmax.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmax.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmaxi.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvmaxi_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmaxi.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmaxi.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvmaxi_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmaxi.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmaxi.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvmaxi_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmaxi.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmaxi.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvmaxi_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvmaxi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmaxi.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmaxi.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min.ll
new file mode 100644
index 00000000000000..c94b1e4ea44cb7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmin.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmin_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmin.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmin.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmin_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmin.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmin.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmin_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmin.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmin.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmin_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmin.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmini.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvmini_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmini.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmini.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvmini_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmini.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmini.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvmini_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmini.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmini.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvmini_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmini.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmin.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmin_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmin.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmin.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmin_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmin.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmin.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmin_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmin.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmin.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmin_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmin_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmin.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmin.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmini.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvmini_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmini.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmini.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvmini_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmini.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmini.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvmini_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmini.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmini.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvmini_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvmini_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmini.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmini.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mod.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mod.ll
new file mode 100644
index 00000000000000..a177246bb23508
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mod.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmod.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmod_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmod.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmod.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmod_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmod.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmod.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmod_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmod.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmod.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmod_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmod.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmod.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmod_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmod.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmod.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmod_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmod.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmod.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmod_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmod.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmod.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmod_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmod_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmod.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmod.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskgez.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskgez.ll
new file mode 100644
index 00000000000000..da87c20ad6ee0a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskgez.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmskgez.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvmskgez_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmskgez_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmskgez.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmskgez.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskltz.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskltz.ll
new file mode 100644
index 00000000000000..b2218487535c63
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mskltz.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmskltz.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvmskltz_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmskltz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmskltz.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmskltz.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmskltz.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvmskltz_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvmskltz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmskltz.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmskltz.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmskltz.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvmskltz_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvmskltz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmskltz.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmskltz.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmskltz.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvmskltz_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvmskltz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmskltz.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmskltz.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msknz.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msknz.ll
new file mode 100644
index 00000000000000..becd2c883a7ed7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msknz.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmsknz.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvmsknz_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvmsknz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmsknz.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmsknz.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msub.ll
new file mode 100644
index 00000000000000..c89f9578b77d7f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-msub.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmsub.b(<32 x i8>, <32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmsub_b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmsub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmsub.b $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmsub.b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmsub.h(<16 x i16>, <16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmsub_h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmsub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmsub.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmsub.h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmsub.w(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmsub_w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmsub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmsub.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmsub.w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmsub.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmsub_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvmsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmsub.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmsub.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-muh.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-muh.ll
new file mode 100644
index 00000000000000..97461512ce1665
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-muh.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmuh.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmuh_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmuh.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmuh.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmuh_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmuh.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmuh.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmuh_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmuh.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmuh.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmuh_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmuh.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmuh.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmuh_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmuh.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmuh.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmuh_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmuh.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmuh.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmuh_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmuh.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmuh.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmuh_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmuh_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmuh.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmuh.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mul.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mul.ll
new file mode 100644
index 00000000000000..d5d852e58a9f9c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mul.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvmul.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvmul_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmul_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmul.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvmul.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmul.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvmul_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmul_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmul.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmul.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmul.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvmul_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmul_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmul.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmul.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmul.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmul_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmul_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmul.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmul.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mulw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mulw.ll
new file mode 100644
index 00000000000000..f69e64aa76980e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-mulw.ll
@@ -0,0 +1,290 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwev_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwev_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwev_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwev_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwev_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwev_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwev_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwev_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwev_h_bu_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.h.bu.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwev.h.bu.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwev_w_hu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.w.hu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwev.w.hu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwev_d_wu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.d.wu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.d.wu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwev_q_du_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwev_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwev.q.du.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwev.q.du.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwod_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwod_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwod_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwod_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwod_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwod_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwod_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwod_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvmulwod_h_bu_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_h_bu_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.h.bu.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvmulwod.h.bu.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvmulwod_w_hu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_w_hu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.w.hu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvmulwod.w.hu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvmulwod_d_wu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_d_wu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.d.wu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.d.wu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvmulwod_q_du_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvmulwod_q_du_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvmulwod.q.du.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvmulwod.q.du.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-neg.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-neg.ll
new file mode 100644
index 00000000000000..ecbedf33465787
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-neg.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvneg.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvneg_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvneg_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvneg.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvneg.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvneg.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvneg_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvneg_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvneg.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvneg.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvneg.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvneg_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvneg_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvneg.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvneg.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvneg.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvneg_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvneg_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvneg.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvneg.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nor.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nor.ll
new file mode 100644
index 00000000000000..674746b7624ec6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nor.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvnor.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvnor_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvnor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvnor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvnor.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nori.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nori.ll
new file mode 100644
index 00000000000000..55eebf87ee921e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-nori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvnori.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvnori_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvnori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvnori.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvnori.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-or.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-or.ll
new file mode 100644
index 00000000000000..16462cfafc54a1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-or.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvor.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvor_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvor.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ori.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ori.ll
new file mode 100644
index 00000000000000..8e53d88bac3746
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvori.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvori_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvori.b $xr0, $xr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvori.b(<32 x i8> %va, i32 3)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-orn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-orn.ll
new file mode 100644
index 00000000000000..3a335cdd371670
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-orn.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvorn.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvorn_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvorn_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvorn.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvorn.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pack.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pack.ll
new file mode 100644
index 00000000000000..512b3023491720
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pack.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpackev.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvpackev_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackev.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpackev.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvpackev.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvpackev_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackev.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvpackev.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpackev.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvpackev_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackev.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpackev.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpackev.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvpackev_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackev.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpackev.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpackod.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvpackod_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackod.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpackod.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvpackod.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvpackod_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackod.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvpackod.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpackod.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvpackod_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackod.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpackod.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpackod.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvpackod_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpackod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpackod.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpackod.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pcnt.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pcnt.ll
new file mode 100644
index 00000000000000..d77f1d2082c8d7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pcnt.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpcnt.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvpcnt_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvpcnt_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpcnt.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpcnt.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvpcnt.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvpcnt_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvpcnt_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpcnt.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvpcnt.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpcnt.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvpcnt_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvpcnt_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpcnt.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpcnt.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpcnt.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvpcnt_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvpcnt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpcnt.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpcnt.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-perm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-perm.ll
new file mode 100644
index 00000000000000..4ec434edd4ec73
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-perm.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvperm.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvperm_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvperm_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvperm.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvperm.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
new file mode 100644
index 00000000000000..0d9f9daabc4488
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-permi.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpermi.w(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvpermi_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpermi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpermi.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpermi.w(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpermi.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvpermi_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvpermi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpermi.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpermi.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvpermi_q(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpermi_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpermi.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpermi.q(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pick.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pick.ll
new file mode 100644
index 00000000000000..bbd6d693ca0b32
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pick.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpickev.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvpickev_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickev_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickev.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpickev.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvpickev.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvpickev_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickev_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickev.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvpickev.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpickev.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvpickev_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickev_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickev.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpickev.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpickev.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvpickev_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickev_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickev.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpickev.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvpickod.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvpickod_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickod_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickod.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvpickod.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvpickod.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvpickod_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickod_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickod.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvpickod.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpickod.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvpickod_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickod_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickod.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpickod.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpickod.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvpickod_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvpickod_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickod.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpickod.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve.ll
new file mode 100644
index 00000000000000..546777bc72ab48
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x i32> @llvm.loongarch.lasx.xvpickve.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvpickve_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvpickve.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvpickve.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvpickve_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvpickve.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <8 x float> @llvm.loongarch.lasx.xvpickve.w.f(<8 x float>, i32)
+
+define <8 x float> @lasx_xvpickve_w_f(<8 x float> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve_w_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x float> @llvm.loongarch.lasx.xvpickve.w.f(<8 x float> %va, i32 1)
+  ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.loongarch.lasx.xvpickve.d.f(<4 x double>, i32)
+
+define <4 x double> @lasx_xvpickve_d_f(<4 x double> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve_d_f:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x double> @llvm.loongarch.lasx.xvpickve.d.f(<4 x double> %va, i32 1)
+  ret <4 x double> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll
new file mode 100644
index 00000000000000..0617e7424321bd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll
@@ -0,0 +1,53 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+
+
+
+declare i32 @llvm.loongarch.lasx.xvpickve2gr.w(<8 x i32>, i32)
+
+define i32 @lasx_xvpickve2gr_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve2gr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve2gr.w $a0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xvpickve2gr.w(<8 x i32> %va, i32 1)
+  ret i32 %res
+}
+
+declare i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64>, i32)
+
+define i64 @lasx_xvpickve2gr_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve2gr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 1)
+  ret i64 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32>, i32)
+
+define i32 @lasx_xvpickve2gr_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve2gr_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve2gr.wu $a0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32> %va, i32 1)
+  ret i32 %res
+}
+
+declare i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64>, i32)
+
+define i64 @lasx_xvpickve2gr_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvpickve2gr_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvpickve2gr.du $a0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 1)
+  ret i64 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl128vei.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl128vei.ll
new file mode 100644
index 00000000000000..25fab44f461f56
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl128vei.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvrepl128vei.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvrepl128vei_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvrepl128vei_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepl128vei.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvrepl128vei.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvrepl128vei.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvrepl128vei_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvrepl128vei_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepl128vei.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvrepl128vei.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvrepl128vei.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvrepl128vei_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvrepl128vei_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepl128vei.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvrepl128vei.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvrepl128vei.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvrepl128vei_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvrepl128vei_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrepl128vei.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvrepl128vei.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll
new file mode 100644
index 00000000000000..c71abd2205c671
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvreplgr2vr.b(i32)
+
+define <32 x i8> @lasx_xvreplgr2vr_b(i32 %a) nounwind {
+; CHECK-LABEL: lasx_xvreplgr2vr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplgr2vr.b $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvreplgr2vr.b(i32 %a)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvreplgr2vr.h(i32)
+
+define <16 x i16> @lasx_xvreplgr2vr_h(i32 %a) nounwind {
+; CHECK-LABEL: lasx_xvreplgr2vr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplgr2vr.h $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvreplgr2vr.h(i32 %a)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvreplgr2vr.w(i32)
+
+define <8 x i32> @lasx_xvreplgr2vr_w(i32 %a) nounwind {
+; CHECK-LABEL: lasx_xvreplgr2vr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplgr2vr.w $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvreplgr2vr.w(i32 %a)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64)
+
+define <4 x i64> @lasx_xvreplgr2vr_d(i64 %a) nounwind {
+; CHECK-LABEL: lasx_xvreplgr2vr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplgr2vr.d $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 %a)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve.ll
new file mode 100644
index 00000000000000..21d36ff7bb5ee0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvreplve.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvreplve_b(<32 x i8> %va, i32 %b) nounwind {
+; CHECK-LABEL: lasx_xvreplve_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve.b $xr0, $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvreplve.b(<32 x i8> %va, i32 %b)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvreplve.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvreplve_h(<16 x i16> %va, i32 %b) nounwind {
+; CHECK-LABEL: lasx_xvreplve_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve.h $xr0, $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvreplve.h(<16 x i16> %va, i32 %b)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvreplve.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvreplve_w(<8 x i32> %va, i32 %b) nounwind {
+; CHECK-LABEL: lasx_xvreplve_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve.w $xr0, $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvreplve.w(<8 x i32> %va, i32 %b)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvreplve.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvreplve_d(<4 x i64> %va, i32 %b) nounwind {
+; CHECK-LABEL: lasx_xvreplve_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve.d $xr0, $xr0, $a0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvreplve.d(<4 x i64> %va, i32 %b)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve0.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve0.ll
new file mode 100644
index 00000000000000..7996bb36ef03cb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replve0.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvreplve0.b(<32 x i8>)
+
+define <32 x i8> @lasx_xvreplve0_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvreplve0_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve0.b $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvreplve0.b(<32 x i8> %va)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvreplve0.h(<16 x i16>)
+
+define <16 x i16> @lasx_xvreplve0_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvreplve0_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve0.h $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvreplve0.h(<16 x i16> %va)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvreplve0.w(<8 x i32>)
+
+define <8 x i32> @lasx_xvreplve0_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvreplve0_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve0.w $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvreplve0.w(<8 x i32> %va)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvreplve0.d(<4 x i64>)
+
+define <4 x i64> @lasx_xvreplve0_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvreplve0_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve0.d $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvreplve0.d(<4 x i64> %va)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvreplve0.q(<32 x i8>)
+
+define <32 x i8> @lasx_xvreplve0_q(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvreplve0_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvreplve0.q $xr0, $xr0
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvreplve0.q(<32 x i8> %va)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-rotr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-rotr.ll
new file mode 100644
index 00000000000000..64d2773864e9f0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-rotr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvrotr.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvrotr_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvrotr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotr.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvrotr.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvrotr.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvrotr_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvrotr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotr.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvrotr.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvrotr.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvrotr_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvrotr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotr.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvrotr.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvrotr.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvrotr_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvrotr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotr.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvrotr.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvrotri.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvrotri_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvrotri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotri.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvrotri.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvrotri.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvrotri_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvrotri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotri.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvrotri.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvrotri.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvrotri_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvrotri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotri.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvrotri.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvrotri.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvrotri_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvrotri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvrotri.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvrotri.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sadd.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sadd.ll
new file mode 100644
index 00000000000000..54a5e2e9c8332d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sadd.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsadd.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsadd_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsadd.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsadd.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsadd_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsadd.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsadd.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsadd_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsadd.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsadd.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsadd_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsadd.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsadd.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsadd_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsadd.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsadd.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsadd_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsadd.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsadd.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsadd_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsadd.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsadd.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsadd_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsadd_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsadd.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsadd.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sat.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sat.ll
new file mode 100644
index 00000000000000..293b9dc9eb4d9f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sat.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsat.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsat_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsat.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsat.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsat_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsat.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsat.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsat_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsat.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsat.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsat_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsat.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsat.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsat_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsat.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsat.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsat_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsat.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsat.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsat_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsat.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsat.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsat_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsat_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsat.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsat.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-seq.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-seq.ll
new file mode 100644
index 00000000000000..83bc93c88c73c3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-seq.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvseq.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvseq_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvseq_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseq.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvseq.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvseq.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvseq_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvseq_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseq.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvseq.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvseq.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvseq_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvseq_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseq.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvseq.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvseq.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvseq_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvseq_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseq.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvseq.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvseqi.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvseqi_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvseqi_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseqi.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvseqi.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvseqi.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvseqi_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvseqi_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseqi.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvseqi.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvseqi.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvseqi_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvseqi_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseqi.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvseqi.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvseqi.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvseqi_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvseqi_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseqi.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvseqi.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll
new file mode 100644
index 00000000000000..6e3e2e0330f525
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lasx.xbz.v(<32 x i8>)
+
+define i32 @lasx_xbz_v(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xbz_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvseteqz.v $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbz.v(<32 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbnz.v(<32 x i8>)
+
+define i32 @lasx_xbnz_v(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xbnz_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetnez.v $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbnz.v(<32 x i8> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll
new file mode 100644
index 00000000000000..a466b78bf8d2d0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lasx.xbnz.b(<32 x i8>)
+
+define i32 @lasx_xbnz_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xbnz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetallnez.b $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbnz.b(<32 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbnz.h(<16 x i16>)
+
+define i32 @lasx_xbnz_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xbnz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetallnez.h $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbnz.h(<16 x i16> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbnz.w(<8 x i32>)
+
+define i32 @lasx_xbnz_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xbnz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetallnez.w $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB2_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbnz.w(<8 x i32> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbnz.d(<4 x i64>)
+
+define i32 @lasx_xbnz_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xbnz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetallnez.d $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB3_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbnz.d(<4 x i64> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll
new file mode 100644
index 00000000000000..36e65fc5b32811
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.lasx.xbz.b(<32 x i8>)
+
+define i32 @lasx_xbz_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xbz_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetanyeqz.b $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbz.b(<32 x i8> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbz.h(<16 x i16>)
+
+define i32 @lasx_xbz_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xbz_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetanyeqz.h $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB1_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbz.h(<16 x i16> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbz.w(<8 x i32>)
+
+define i32 @lasx_xbz_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xbz_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetanyeqz.w $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB2_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbz.w(<8 x i32> %va)
+  ret i32 %res
+}
+
+declare i32 @llvm.loongarch.lasx.xbz.d(<4 x i64>)
+
+define i32 @lasx_xbz_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xbz_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsetanyeqz.d $fcc0, $xr0
+; CHECK-NEXT:    bcnez $fcc0, .LBB3_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: # %entry
+; CHECK-NEXT:    addi.w $a0, $zero, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call i32 @llvm.loongarch.lasx.xbz.d(<4 x i64> %va)
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf.ll
new file mode 100644
index 00000000000000..9b9140f6ad6217
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvshuf.b(<32 x i8>, <32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvshuf_b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc) nounwind {
+; CHECK-LABEL: lasx_xvshuf_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf.b $xr0, $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvshuf.b(<32 x i8> %va, <32 x i8> %vb, <32 x i8> %vc)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvshuf.h(<16 x i16>, <16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvshuf_h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc) nounwind {
+; CHECK-LABEL: lasx_xvshuf_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf.h $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvshuf.h(<16 x i16> %va, <16 x i16> %vb, <16 x i16> %vc)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvshuf.w(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvshuf_w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc) nounwind {
+; CHECK-LABEL: lasx_xvshuf_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf.w $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvshuf.w(<8 x i32> %va, <8 x i32> %vb, <8 x i32> %vc)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvshuf.d(<4 x i64>, <4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvshuf_d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc) nounwind {
+; CHECK-LABEL: lasx_xvshuf_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf.d $xr0, $xr1, $xr2
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvshuf.d(<4 x i64> %va, <4 x i64> %vb, <4 x i64> %vc)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf4i.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf4i.ll
new file mode 100644
index 00000000000000..31205086759c40
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-shuf4i.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvshuf4i.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvshuf4i_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvshuf4i_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf4i.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvshuf4i.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvshuf4i.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvshuf4i_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvshuf4i_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf4i.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvshuf4i.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvshuf4i.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvshuf4i_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvshuf4i_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf4i.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvshuf4i.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvshuf4i.d(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvshuf4i_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvshuf4i_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvshuf4i.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvshuf4i.d(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-signcov.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-signcov.ll
new file mode 100644
index 00000000000000..e6c6d8ccd0d350
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-signcov.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsigncov.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsigncov_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsigncov_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsigncov.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsigncov.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsigncov.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsigncov_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsigncov_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsigncov.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsigncov.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsigncov.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsigncov_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsigncov_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsigncov.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsigncov.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsigncov.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsigncov_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsigncov_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsigncov.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsigncov.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sle.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sle.ll
new file mode 100644
index 00000000000000..8895efc84b845d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sle.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsle.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsle_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsle.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsle.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsle_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsle.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsle.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsle_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsle.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsle.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsle_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsle.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslei.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvslei_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslei.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslei.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvslei_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslei.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslei.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvslei_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslei.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslei.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvslei_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslei.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsle.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsle_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsle.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsle.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsle_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsle.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsle.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsle_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsle.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsle.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsle_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsle_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsle.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsle.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslei.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvslei_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslei.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslei.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvslei_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslei.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslei.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvslei_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslei.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslei.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvslei_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvslei_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslei.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslei.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sll.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sll.ll
new file mode 100644
index 00000000000000..14110b613dbe30
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sll.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsll.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsll_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsll_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsll.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsll.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsll.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsll_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsll_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsll.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsll.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsll.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsll_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsll_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsll.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsll.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsll.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsll_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsll_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsll.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsll.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslli.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvslli_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvslli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslli.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslli.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslli.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvslli_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvslli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslli.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslli.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslli.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvslli_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvslli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslli.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslli.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslli.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvslli_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvslli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslli.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslli.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sllwil.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sllwil.ll
new file mode 100644
index 00000000000000..a72b8a6cbb4f41
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sllwil.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsllwil.h.b(<32 x i8>, i32)
+
+define <16 x i16> @lasx_xvsllwil_h_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.h.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsllwil.h.b(<32 x i8> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsllwil.w.h(<16 x i16>, i32)
+
+define <8 x i32> @lasx_xvsllwil_w_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.w.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsllwil.w.h(<16 x i16> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsllwil.d.w(<8 x i32>, i32)
+
+define <4 x i64> @lasx_xvsllwil_d_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.d.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsllwil.d.w(<8 x i32> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsllwil.hu.bu(<32 x i8>, i32)
+
+define <16 x i16> @lasx_xvsllwil_hu_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_hu_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.hu.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsllwil.hu.bu(<32 x i8> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsllwil.wu.hu(<16 x i16>, i32)
+
+define <8 x i32> @lasx_xvsllwil_wu_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_wu_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.wu.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsllwil.wu.hu(<16 x i16> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsllwil.du.wu(<8 x i32>, i32)
+
+define <4 x i64> @lasx_xvsllwil_du_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsllwil_du_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsllwil.du.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsllwil.du.wu(<8 x i32> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-slt.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-slt.ll
new file mode 100644
index 00000000000000..3ea87adff110a2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-slt.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslt.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvslt_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslt.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslt.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvslt_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslt.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslt.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvslt_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslt.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslt.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvslt_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslt.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslti.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvslti_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslti.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslti.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvslti_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslti.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslti.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvslti_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslti.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslti.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvslti_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslti.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslt.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvslt_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslt.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslt.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvslt_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslt.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslt.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvslt_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslt.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslt.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvslt_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvslt_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslt.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslt.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvslti.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvslti_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvslti.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvslti.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvslti_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvslti.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvslti.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvslti_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvslti.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvslti.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvslti_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvslti_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvslti.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvslti.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sra.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sra.ll
new file mode 100644
index 00000000000000..a7498682559bd3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sra.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsra.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsra_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsra_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsra.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsra.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsra.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsra_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsra_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsra.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsra.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsra.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsra_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsra_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsra.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsra.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsra.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsra_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsra_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsra.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsra.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrai.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrai_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrai_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrai.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrai.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrai_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrai_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrai.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrai.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrai_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrai_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrai.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrai.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrai_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrai_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrai.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sran.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sran.ll
new file mode 100644
index 00000000000000..f59ae4c196621f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sran.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsran.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvsran_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsran_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsran.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsran.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsran.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvsran_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsran_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsran.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsran.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsran.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvsran_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsran_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsran.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsran.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srani.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srani.ll
new file mode 100644
index 00000000000000..91fb90da9c525b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srani.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrani.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrani_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrani_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrani.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrani.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrani.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrani_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrani_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrani.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrani.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrani.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrani_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrani_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrani.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrani.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrani.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrani_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrani_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrani.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrani.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srar.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srar.ll
new file mode 100644
index 00000000000000..e2c160557c4dc0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srar.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrar.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsrar_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrar_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrar.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrar.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrar.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsrar_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrar_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrar.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrar.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrar.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsrar_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrar_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrar.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrar.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrar.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsrar_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrar_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrar.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrar.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrari.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrari_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrari_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrari.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrari.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrari.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrari_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrari_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrari.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrari.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrari.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrari_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrari_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrari.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrari.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrari.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrari_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrari_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrari.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrari.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarn.ll
new file mode 100644
index 00000000000000..02dd989773ca11
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarn.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrarn.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvsrarn_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarn.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrarn.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrarn.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvsrarn_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarn.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrarn.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrarn.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvsrarn_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarn.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrarn.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarni.ll
new file mode 100644
index 00000000000000..a7d2c37397936c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srarni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrarni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrarni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrarni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrarni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrarni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrarni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrarni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrarni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrarni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrarni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrarni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrarni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrarni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrarni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srl.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srl.ll
new file mode 100644
index 00000000000000..7b2992f2ca3bca
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srl.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrl.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsrl_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrl_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrl.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrl.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrl.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsrl_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrl_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrl.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrl.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrl.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsrl_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrl_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrl.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrl.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrl.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsrl_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrl_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrl.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrl.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrli.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrli_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrli_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrli.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrli.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrli_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrli_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrli.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrli.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrli_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrli_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrli.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrli.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrli_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrli_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrli.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srln.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srln.ll
new file mode 100644
index 00000000000000..dc5c0e016ea0a7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srln.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrln.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvsrln_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrln_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrln.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrln.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrln.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvsrln_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrln_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrln.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrln.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrln.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvsrln_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrln_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrln.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrln.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlni.ll
new file mode 100644
index 00000000000000..0301ebb195e266
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrlni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrlni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrlni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrlni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrlni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrlni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrlni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrlni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrlni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrlni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrlni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrlni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlr.ll
new file mode 100644
index 00000000000000..e04504158e2746
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlr.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrlr.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsrlr_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlr_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlr.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrlr.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrlr.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsrlr_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlr_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlr.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrlr.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrlr.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsrlr_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlr_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlr.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrlr.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrlr.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsrlr_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlr_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlr.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrlr.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrlri.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrlri_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrlri_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlri.b $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrlri.b(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrlri.h(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrlri_h(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrlri_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlri.h $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrlri.h(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrlri.w(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrlri_w(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrlri_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlri.w $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrlri.w(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrlri.d(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrlri_d(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsrlri_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlri.d $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrlri.d(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrn.ll
new file mode 100644
index 00000000000000..1e7df379c6e1e0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrn.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrlrn.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvsrlrn_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrn.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrlrn.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrlrn.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvsrlrn_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrn.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrlrn.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrlrn.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvsrlrn_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrn.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrlrn.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrni.ll
new file mode 100644
index 00000000000000..56dbafe8b1ac38
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-srlrni.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsrlrni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsrlrni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsrlrni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsrlrni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsrlrni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsrlrni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsrlrni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsrlrni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsrlrni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsrlrni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsrlrni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsrlrni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsrlrni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsrlrni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssran.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssran.ll
new file mode 100644
index 00000000000000..da1857dad14512
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssran.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssran.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssran_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssran.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssran.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssran_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssran.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssran.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssran_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssran.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssran.bu.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssran_bu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.bu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssran.bu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssran.hu.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssran_hu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.hu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssran.hu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssran.wu.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssran_wu_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssran_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssran.wu.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssran.wu.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrani.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrani.ll
new file mode 100644
index 00000000000000..9efa659b4a1e0f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrani.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrani.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrani_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrani.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrani.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrani_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrani.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrani.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrani_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrani.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrani.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrani_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrani.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrani.bu.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrani_bu_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.bu.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrani.bu.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrani.hu.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrani_hu_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.hu.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrani.hu.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrani.wu.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrani_wu_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.wu.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrani.wu.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrani.du.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrani_du_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrani_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrani.du.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrani.du.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarn.ll
new file mode 100644
index 00000000000000..b5d59ff06f4d14
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarn.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrarn.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrarn_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrarn.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrarn.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrarn_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrarn.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrarn.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrarn_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrarn.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrarn.bu.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrarn_bu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.bu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrarn.bu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrarn.hu.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrarn_hu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.hu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrarn.hu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrarn.wu.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrarn_wu_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarn_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarn.wu.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrarn.wu.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarni.ll
new file mode 100644
index 00000000000000..da411dad645bbd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrarni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrarni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrarni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrarni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrarni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrarni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrarni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrarni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrarni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrarni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrarni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrarni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrarni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrarni.bu.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrarni_bu_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.bu.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrarni.bu.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrarni.hu.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrarni_hu_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.hu.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrarni.hu.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrarni.wu.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrarni_wu_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.wu.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrarni.wu.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrarni.du.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrarni_du_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrarni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrarni.du.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrarni.du.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrln.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrln.ll
new file mode 100644
index 00000000000000..c60b5bdf81a03a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrln.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrln.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrln_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrln.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrln.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrln_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrln.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrln.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrln_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrln.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrln.bu.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrln_bu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.bu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrln.bu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrln.hu.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrln_hu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.hu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrln.hu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrln.wu.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrln_wu_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrln_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrln.wu.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrln.wu.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlni.ll
new file mode 100644
index 00000000000000..e57dd426bde8ce
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrlni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrlni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrlni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrlni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrlni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrlni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlni.bu.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrlni_bu_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.bu.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlni.bu.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlni.hu.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrlni_hu_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.hu.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlni.hu.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlni.wu.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrlni_wu_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.wu.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlni.wu.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrlni.du.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrlni_du_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlni.du.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrlni.du.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrn.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrn.ll
new file mode 100644
index 00000000000000..774cf1bd5e8497
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrn.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlrn.b.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrlrn_b_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.b.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlrn.b.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlrn.h.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrlrn_h_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.h.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlrn.h.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlrn.w.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrlrn_w_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.w.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlrn.w.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlrn.bu.h(<16 x i16>, <16 x i16>)
+
+define <32 x i8> @lasx_xvssrlrn_bu_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.bu.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlrn.bu.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlrn.hu.w(<8 x i32>, <8 x i32>)
+
+define <16 x i16> @lasx_xvssrlrn_hu_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.hu.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlrn.hu.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlrn.wu.d(<4 x i64>, <4 x i64>)
+
+define <8 x i32> @lasx_xvssrlrn_wu_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrn_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrn.wu.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlrn.wu.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <8 x i32> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrni.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrni.ll
new file mode 100644
index 00000000000000..9a80516d8d7838
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssrlrni.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlrni.b.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrlrni_b_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_b_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.b.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlrni.b.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlrni.h.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrlrni_h_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_h_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.h.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlrni.h.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlrni.w.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrlrni_w_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_w_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.w.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlrni.w.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrlrni.d.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrlrni_d_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_d_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.d.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrlrni.d.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssrlrni.bu.h(<32 x i8>, <32 x i8>, i32)
+
+define <32 x i8> @lasx_xvssrlrni_bu_h(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_bu_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.bu.h $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssrlrni.bu.h(<32 x i8> %va, <32 x i8> %vb, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssrlrni.hu.w(<16 x i16>, <16 x i16>, i32)
+
+define <16 x i16> @lasx_xvssrlrni_hu_w(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_hu_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.hu.w $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssrlrni.hu.w(<16 x i16> %va, <16 x i16> %vb, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssrlrni.wu.d(<8 x i32>, <8 x i32>, i32)
+
+define <8 x i32> @lasx_xvssrlrni_wu_d(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_wu_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.wu.d $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssrlrni.wu.d(<8 x i32> %va, <8 x i32> %vb, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssrlrni.du.q(<4 x i64>, <4 x i64>, i32)
+
+define <4 x i64> @lasx_xvssrlrni_du_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssrlrni_du_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssrlrni.du.q $xr0, $xr1, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssrlrni.du.q(<4 x i64> %va, <4 x i64> %vb, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssub.ll
new file mode 100644
index 00000000000000..cd3ccd9f526250
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-ssub.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssub.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvssub_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssub.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssub.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvssub_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssub.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssub.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvssub_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssub.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssub.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvssub_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssub.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <32 x i8> @llvm.loongarch.lasx.xvssub.bu(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvssub_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvssub.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvssub.hu(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvssub_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvssub.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvssub.wu(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvssub_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvssub.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvssub.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvssub_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvssub_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvssub.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvssub.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
new file mode 100644
index 00000000000000..b69e7b813f0c1d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-st.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare void @llvm.loongarch.lasx.xvst(<32 x i8>, i8*, i32)
+
+define void @lasx_xvst(<32 x i8> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvst:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvst $xr0, $a0, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvst(<32 x i8> %va, i8* %p, i32 1)
+  ret void
+}
+
+declare void @llvm.loongarch.lasx.xvstx(<32 x i8>, i8*, i64)
+
+define void @lasx_xvstx(<32 x i8> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvstx:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a1, $zero, 1
+; CHECK-NEXT:    xvstx $xr0, $a0, $a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvstx(<32 x i8> %va, i8* %p, i64 1)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
new file mode 100644
index 00000000000000..52ef3c4714127d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-stelm.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare void @llvm.loongarch.lasx.xvstelm.b(<32 x i8>, i8*, i32, i32)
+
+define void @lasx_xvstelm_b(<32 x i8> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvstelm_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvstelm.b $xr0, $a0, 1, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvstelm.b(<32 x i8> %va, i8* %p, i32 1, i32 1)
+  ret void
+}
+
+declare void @llvm.loongarch.lasx.xvstelm.h(<16 x i16>, i8*, i32, i32)
+
+define void @lasx_xvstelm_h(<16 x i16> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvstelm_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvstelm.h $xr0, $a0, 2, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvstelm.h(<16 x i16> %va, i8* %p, i32 2, i32 1)
+  ret void
+}
+
+declare void @llvm.loongarch.lasx.xvstelm.w(<8 x i32>, i8*, i32, i32)
+
+define void @lasx_xvstelm_w(<8 x i32> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvstelm_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvstelm.w $xr0, $a0, 4, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvstelm.w(<8 x i32> %va, i8* %p, i32 4, i32 1)
+  ret void
+}
+
+declare void @llvm.loongarch.lasx.xvstelm.d(<4 x i64>, i8*, i32, i32)
+
+define void @lasx_xvstelm_d(<4 x i64> %va, i8* %p) nounwind {
+; CHECK-LABEL: lasx_xvstelm_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvstelm.d $xr0, $a0, 8, 1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.loongarch.lasx.xvstelm.d(<4 x i64> %va, i8* %p, i32 8, i32 1)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sub.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sub.ll
new file mode 100644
index 00000000000000..4d69dd83dcde7f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-sub.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsub.b(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvsub_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsub_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsub.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsub.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsub.h(<16 x i16>, <16 x i16>)
+
+define <16 x i16> @lasx_xvsub_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsub_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsub.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsub.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsub.w(<8 x i32>, <8 x i32>)
+
+define <8 x i32> @lasx_xvsub_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsub_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsub.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsub.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsub.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsub_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsub_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsub.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsub.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsub.q(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsub_q(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsub_q:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsub.q $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsub.q(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subi.ll
new file mode 100644
index 00000000000000..cc3235ff4657d4
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subi.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvsubi.bu(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvsubi_bu(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvsubi_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubi.bu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvsubi.bu(<32 x i8> %va, i32 1)
+  ret <32 x i8> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsubi.hu(<16 x i16>, i32)
+
+define <16 x i16> @lasx_xvsubi_hu(<16 x i16> %va) nounwind {
+; CHECK-LABEL: lasx_xvsubi_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubi.hu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsubi.hu(<16 x i16> %va, i32 1)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsubi.wu(<8 x i32>, i32)
+
+define <8 x i32> @lasx_xvsubi_wu(<8 x i32> %va) nounwind {
+; CHECK-LABEL: lasx_xvsubi_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubi.wu $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsubi.wu(<8 x i32> %va, i32 1)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubi.du(<4 x i64>, i32)
+
+define <4 x i64> @lasx_xvsubi_du(<4 x i64> %va) nounwind {
+; CHECK-LABEL: lasx_xvsubi_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubi.du $xr0, $xr0, 1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubi.du(<4 x i64> %va, i32 1)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subw.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subw.ll
new file mode 100644
index 00000000000000..6f203e8949900b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-subw.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvsubwev_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvsubwev_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvsubwev_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsubwev_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvsubwev_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwev.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvsubwev_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwev.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvsubwev_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsubwev_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwev_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwev.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwev.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.b(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvsubwod_h_b(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_h_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.h.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.b(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.h(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvsubwod_w_h(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_w_h:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.w.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.h(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.w(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvsubwod_d_w(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_d_w:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.d.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.w(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.d(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsubwod_q_d(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_q_d:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.q.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.d(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}
+
+declare <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.bu(<32 x i8>, <32 x i8>)
+
+define <16 x i16> @lasx_xvsubwod_h_bu(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_h_bu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.h.bu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <16 x i16> @llvm.loongarch.lasx.xvsubwod.h.bu(<32 x i8> %va, <32 x i8> %vb)
+  ret <16 x i16> %res
+}
+
+declare <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.hu(<16 x i16>, <16 x i16>)
+
+define <8 x i32> @lasx_xvsubwod_w_hu(<16 x i16> %va, <16 x i16> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_w_hu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.w.hu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <8 x i32> @llvm.loongarch.lasx.xvsubwod.w.hu(<16 x i16> %va, <16 x i16> %vb)
+  ret <8 x i32> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.wu(<8 x i32>, <8 x i32>)
+
+define <4 x i64> @lasx_xvsubwod_d_wu(<8 x i32> %va, <8 x i32> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_d_wu:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.d.wu $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.d.wu(<8 x i32> %va, <8 x i32> %vb)
+  ret <4 x i64> %res
+}
+
+declare <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.du(<4 x i64>, <4 x i64>)
+
+define <4 x i64> @lasx_xvsubwod_q_du(<4 x i64> %va, <4 x i64> %vb) nounwind {
+; CHECK-LABEL: lasx_xvsubwod_q_du:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvsubwod.q.du $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <4 x i64> @llvm.loongarch.lasx.xvsubwod.q.du(<4 x i64> %va, <4 x i64> %vb)
+  ret <4 x i64> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xor.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xor.ll
new file mode 100644
index 00000000000000..6395b3d6f2e7a8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xor.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvxor.v(<32 x i8>, <32 x i8>)
+
+define <32 x i8> @lasx_xvxor_v(<32 x i8> %va, <32 x i8> %vb) nounwind {
+; CHECK-LABEL: lasx_xvxor_v:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvxor.v $xr0, $xr0, $xr1
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvxor.v(<32 x i8> %va, <32 x i8> %vb)
+  ret <32 x i8> %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xori.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xori.ll
new file mode 100644
index 00000000000000..c71d7e7311656c
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-xori.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <32 x i8> @llvm.loongarch.lasx.xvxori.b(<32 x i8>, i32)
+
+define <32 x i8> @lasx_xvxori_b(<32 x i8> %va) nounwind {
+; CHECK-LABEL: lasx_xvxori_b:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvxori.b $xr0, $xr0, 3
+; CHECK-NEXT:    ret
+entry:
+  %res = call <32 x i8> @llvm.loongarch.lasx.xvxori.b(<32 x i8> %va, i32 3)
+  ret <32 x i8> %res
+}


        


More information about the llvm-commits mailing list