[llvm] dbbc7c3 - [LoongArch] Add some binary IR instructions testcases for LASX (#74031)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 30 21:14:18 PST 2023


Author: leecheechen
Date: 2023-12-01T13:14:11+08:00
New Revision: dbbc7c31c8e55d72dc243b244e386a25132e7215

URL: https://github.com/llvm/llvm-project/commit/dbbc7c31c8e55d72dc243b244e386a25132e7215
DIFF: https://github.com/llvm/llvm-project/commit/dbbc7c31c8e55d72dc243b244e386a25132e7215.diff

LOG: [LoongArch] Add some binary IR instructions testcases for LASX (#74031)

The IR instructions include:
- Binary Operations: add fadd sub fsub mul fmul udiv sdiv fdiv
- Bitwise Binary Operations: shl lshr ashr

Added: 
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/add.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ashr.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fadd.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fmul.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fsub.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/lshr.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/mul.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sdiv.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shl.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sub.ll
    llvm/test/CodeGen/LoongArch/lasx/ir-instruction/udiv.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/add.ll
new file mode 100644
index 000000000000000..8e4d0dc6f1c380f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/add.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @add_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvadd.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = add <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @add_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvadd.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = add <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @add_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvadd.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = add <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @add_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvadd.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = add <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @add_v32i8_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v32i8_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvaddi.bu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = add <32 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @add_v16i16_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v16i16_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvaddi.hu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = add <16 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @add_v8i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v8i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvaddi.wu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = add <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @add_v4i64_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v4i64_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvaddi.du $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = add <4 x i64> %v0, <i64 31, i64 31, i64 31, i64 31>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ashr.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ashr.ll
new file mode 100644
index 000000000000000..fcbf0f1400fe61f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/ashr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @ashr_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsra.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = ashr <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsra.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = ashr <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsra.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = ashr <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsra.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = ashr <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v32i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v32i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = ashr <32 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v32i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v32i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.b $xr0, $xr0, 7
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = ashr <32 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v16i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = ashr <16 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v16i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.h $xr0, $xr0, 15
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = ashr <16 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = ashr <8 x i32> %v0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.w $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = ashr <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = ashr <4 x i64> %v0, <i64 1, i64 1, i64 1, i64 1>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.d $xr0, $xr0, 63
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = ashr <4 x i64> %v0, <i64 63, i64 63, i64 63, i64 63>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fadd.ll
new file mode 100644
index 000000000000000..365bb305fc5aaa3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fadd.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @fadd_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfadd.s $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %v1 = load <8 x float>, ptr %a1
+  %v2 = fadd <8 x float> %v0, %v1
+  store <8 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fadd_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfadd.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %v1 = load <4 x double>, ptr %a1
+  %v2 = fadd <4 x double> %v0, %v1
+  store <4 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll
new file mode 100644
index 000000000000000..284121a79a492d0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @fdiv_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfdiv.s $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %v1 = load <8 x float>, ptr %a1
+  %v2 = fdiv <8 x float> %v0, %v1
+  store <8 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fdiv_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfdiv.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %v1 = load <4 x double>, ptr %a1
+  %v2 = fdiv <4 x double> %v0, %v1
+  store <4 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fmul.ll
new file mode 100644
index 000000000000000..a48dca8d284704b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fmul.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @fmul_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfmul.s $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %v1 = load <8 x float>, ptr %a1
+  %v2 = fmul <8 x float> %v0, %v1
+  store <8 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fmul_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfmul.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %v1 = load <4 x double>, ptr %a1
+  %v2 = fmul <4 x double> %v0, %v1
+  store <4 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fsub.ll
new file mode 100644
index 000000000000000..6164aa5a55c7e40
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fsub.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @fsub_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfsub.s $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x float>, ptr %a0
+  %v1 = load <8 x float>, ptr %a1
+  %v2 = fsub <8 x float> %v0, %v1
+  store <8 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fsub_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvfsub.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x double>, ptr %a0
+  %v1 = load <4 x double>, ptr %a1
+  %v2 = fsub <4 x double> %v0, %v1
+  store <4 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/lshr.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/lshr.ll
new file mode 100644
index 000000000000000..24be69d8032a82a
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/lshr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @lshr_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsrl.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = lshr <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsrl.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = lshr <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsrl.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = lshr <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsrl.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = lshr <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v32i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v32i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = lshr <32 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v32i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v32i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.b $xr0, $xr0, 7
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = lshr <32 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v16i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = lshr <16 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v16i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.h $xr0, $xr0, 15
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = lshr <16 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = lshr <8 x i32> %v0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.w $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = lshr <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = lshr <4 x i64> %v0, <i64 1, i64 1, i64 1, i64 1>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.d $xr0, $xr0, 63
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = lshr <4 x i64> %v0, <i64 63, i64 63, i64 63, i64 63>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/mul.ll
new file mode 100644
index 000000000000000..dcb893caa2555a2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/mul.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @mul_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmul.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = mul <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v16i16(ptr %res, ptr %a0, ptr %a1)  nounwind {
+; CHECK-LABEL: mul_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmul.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = mul <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmul.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = mul <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvmul.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = mul <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @mul_square_v32i8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvmul.b $xr0, $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = mul <32 x i8> %v0, %v0
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v16i16(ptr %res, ptr %a0)  nounwind {
+; CHECK-LABEL: mul_square_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvmul.h $xr0, $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = mul <16 x i16> %v0, %v0
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v8i32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvmul.w $xr0, $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = mul <8 x i32> %v0, %v0
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v4i64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvmul.d $xr0, $xr0, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = mul <4 x i64> %v0, %v0
+  store <4 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v32i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v32i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.b $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = mul <32 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v16i16_8(ptr %res, ptr %a0)  nounwind {
+; CHECK-LABEL: mul_v16i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.h $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = mul <16 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v8i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v8i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.w $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = mul <8 x i32> %v0, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v4i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v4i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.d $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = mul <4 x i64> %v0, <i64 8, i64 8, i64 8, i64 8>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v32i8_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v32i8_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvrepli.b $xr1, 17
+; CHECK-NEXT:    xvmul.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = mul <32 x i8> %v0, <i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v16i16_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v16i16_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvrepli.h $xr1, 17
+; CHECK-NEXT:    xvmul.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = mul <16 x i16> %v0, <i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v8i32_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v8i32_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvrepli.w $xr1, 17
+; CHECK-NEXT:    xvmul.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = mul <8 x i32> %v0, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v4i64_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v4i64_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvrepli.d $xr1, 17
+; CHECK-NEXT:    xvmul.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = mul <4 x i64> %v0, <i64 17, i64 17, i64 17, i64 17>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sdiv.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sdiv.ll
new file mode 100644
index 000000000000000..e3635a5f14a2bac
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sdiv.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @sdiv_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = sdiv <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = sdiv <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = sdiv <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = sdiv <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v32i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v32i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.b $xr1, $xr0, 7
+; CHECK-NEXT:    xvsrli.b $xr1, $xr1, 5
+; CHECK-NEXT:    xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsrai.b $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = sdiv <32 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v16i16_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v16i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.h $xr1, $xr0, 15
+; CHECK-NEXT:    xvsrli.h $xr1, $xr1, 13
+; CHECK-NEXT:    xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsrai.h $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = sdiv <16 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v8i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v8i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.w $xr1, $xr0, 31
+; CHECK-NEXT:    xvsrli.w $xr1, $xr1, 29
+; CHECK-NEXT:    xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsrai.w $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = sdiv <8 x i32> %v0, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v4i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v4i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrai.d $xr1, $xr0, 63
+; CHECK-NEXT:    xvsrli.d $xr1, $xr1, 61
+; CHECK-NEXT:    xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT:    xvsrai.d $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = sdiv <4 x i64> %v0, <i64 8, i64 8, i64 8, i64 8>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shl.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shl.ll
new file mode 100644
index 000000000000000..8a02c7e3ac975a9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shl.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @shl_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsll.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = shl <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsll.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = shl <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsll.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = shl <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsll.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = shl <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v32i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v32i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.b $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = shl <32 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v32i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v32i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.b $xr0, $xr0, 7
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = shl <32 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v16i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v16i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.h $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = shl <16 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v16i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v16i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.h $xr0, $xr0, 15
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = shl <16 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v8i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v8i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.w $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = shl <8 x i32> %v0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v8i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v8i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.w $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = shl <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v4i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v4i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.d $xr0, $xr0, 1
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = shl <4 x i64> %v0, <i64 1, i64 1, i64 1, i64 1>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v4i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v4i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvslli.d $xr0, $xr0, 63
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = shl <4 x i64> %v0, <i64 63, i64 63, i64 63, i64 63>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sub.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sub.ll
new file mode 100644
index 000000000000000..bcfff16514770f3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/sub.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @sub_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsub.b $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = sub <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsub.h $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = sub <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsub.w $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = sub <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvsub.d $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = sub <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v32i8_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v32i8_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsubi.bu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = sub <32 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v16i16_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v16i16_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsubi.hu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = sub <16 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v8i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v8i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsubi.wu $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = sub <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v4i64_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v4i64_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsubi.du $xr0, $xr0, 31
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = sub <4 x i64> %v0, <i64 31, i64 31, i64 31, i64 31>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/udiv.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/udiv.ll
new file mode 100644
index 000000000000000..e78084c7186d338
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/udiv.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @udiv_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.bu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = load <32 x i8>, ptr %a1
+  %v2 = udiv <32 x i8> %v0, %v1
+  store <32 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.hu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = load <16 x i16>, ptr %a1
+  %v2 = udiv <16 x i16> %v0, %v1
+  store <16 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.wu $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = load <8 x i32>, ptr %a1
+  %v2 = udiv <8 x i32> %v0, %v1
+  store <8 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a2, 0
+; CHECK-NEXT:    xvld $xr1, $a1, 0
+; CHECK-NEXT:    xvdiv.du $xr0, $xr1, $xr0
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = load <4 x i64>, ptr %a1
+  %v2 = udiv <4 x i64> %v0, %v1
+  store <4 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v32i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v32i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.b $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <32 x i8>, ptr %a0
+  %v1 = udiv <32 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <32 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v16i16_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v16i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.h $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i16>, ptr %a0
+  %v1 = udiv <16 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <16 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v8i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v8i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.w $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i32>, ptr %a0
+  %v1 = udiv <8 x i32> %v0, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
+  store <8 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v4i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v4i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xvld $xr0, $a1, 0
+; CHECK-NEXT:    xvsrli.d $xr0, $xr0, 3
+; CHECK-NEXT:    xvst $xr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i64>, ptr %a0
+  %v1 = udiv <4 x i64> %v0, <i64 8, i64 8, i64 8, i64 8>
+  store <4 x i64> %v1, ptr %res
+  ret void
+}


        


More information about the llvm-commits mailing list