[llvm] 29a0f3e - [LoongArch] Add some binary IR instructions testcases for LSX (#73929)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 30 05:41:25 PST 2023


Author: leecheechen
Date: 2023-11-30T21:41:18+08:00
New Revision: 29a0f3ec2b47630ce229953fe7250e741b6c10b6

URL: https://github.com/llvm/llvm-project/commit/29a0f3ec2b47630ce229953fe7250e741b6c10b6
DIFF: https://github.com/llvm/llvm-project/commit/29a0f3ec2b47630ce229953fe7250e741b6c10b6.diff

LOG: [LoongArch] Add some binary IR instructions testcases for LSX (#73929)

The IR instructions include:
- Binary Operations: add fadd sub fsub mul fmul udiv sdiv fdiv
- Bitwise Binary Operations: shl lshr ashr

Added: 
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/mul.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sdiv.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shl.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sub.ll
    llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll
new file mode 100644
index 000000000000000..2a7c37c2ae346ef
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @add_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = add <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @add_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = add <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @add_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = add <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @add_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = add <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @add_v16i8_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v16i8_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.bu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = add <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @add_v8i16_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v8i16_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.hu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = add <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @add_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = add <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @add_v2i64_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v2i64_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.du $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = add <2 x i64> %v0, <i64 31, i64 31>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll
new file mode 100644
index 000000000000000..fbc570d77ba8038
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @ashr_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = ashr <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = ashr <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = ashr <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = ashr <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v16i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = ashr <16 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v16i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 7
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = ashr <16 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = ashr <8 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 15
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = ashr <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = ashr <4 x i32> %v0, <i32 1, i32 1, i32 1, i32 1>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = ashr <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v2i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = ashr <2 x i64> %v0, <i64 1, i64 1>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v2i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 63
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = ashr <2 x i64> %v0, <i64 63, i64 63>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll
new file mode 100644
index 000000000000000..1fa1f611c4a36c7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fadd_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfadd.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fadd <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fadd_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfadd.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fadd <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll
new file mode 100644
index 000000000000000..eb7c8bd9616ec72
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fdiv_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfdiv.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fdiv <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fdiv_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfdiv.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fdiv <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll
new file mode 100644
index 000000000000000..e7fb527f7805e81
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fmul_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfmul.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fmul <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fmul_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfmul.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fmul <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll
new file mode 100644
index 000000000000000..df98182321dab9b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fsub_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfsub.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fsub <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fsub_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfsub.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fsub <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll
new file mode 100644
index 000000000000000..dada52f93060e10
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @lshr_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = lshr <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = lshr <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = lshr <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = lshr <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v16i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = lshr <16 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v16i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 7
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = lshr <16 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = lshr <8 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 15
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = lshr <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = lshr <4 x i32> %v0, <i32 1, i32 1, i32 1, i32 1>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = lshr <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v2i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = lshr <2 x i64> %v0, <i64 1, i64 1>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v2i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.d $vr0, $vr0, 63
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = lshr <2 x i64> %v0, <i64 63, i64 63>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/mul.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/mul.ll
new file mode 100644
index 000000000000000..5060240cd8b1320
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/mul.ll
@@ -0,0 +1,242 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @mul_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = mul <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v8i16(ptr %res, ptr %a0, ptr %a1)  nounwind {
+; CHECK-LABEL: mul_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = mul <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = mul <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @mul_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: mul_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = mul <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @mul_square_v16i8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vmul.b $vr0, $vr0, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = mul <16 x i8> %v0, %v0
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v8i16(ptr %res, ptr %a0)  nounwind {
+; CHECK-LABEL: mul_square_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vmul.h $vr0, $vr0, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = mul <8 x i16> %v0, %v0
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v4i32(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vmul.w $vr0, $vr0, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = mul <4 x i32> %v0, %v0
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_square_v2i64(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_square_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vmul.d $vr0, $vr0, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = mul <2 x i64> %v0, %v0
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v16i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v16i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.b $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = mul <16 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v8i16_8(ptr %res, ptr %a0)  nounwind {
+; CHECK-LABEL: mul_v8i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.h $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = mul <8 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v4i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v4i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.w $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = mul <4 x i32> %v0, <i32 8, i32 8, i32 8, i32 8>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v2i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v2i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.d $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = mul <2 x i64> %v0, <i64 8, i64 8>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v16i8_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v16i8_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a2, $zero, 17
+; CHECK-NEXT:    vreplgr2vr.b $vr0, $a2
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = mul <16 x i8> %v0, <i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v8i16_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v8i16_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a2, $zero, 17
+; CHECK-NEXT:    vreplgr2vr.h $vr0, $a2
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = mul <8 x i16> %v0, <i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17, i16 17>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v4i32_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v4i32_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a2, $zero, 17
+; CHECK-NEXT:    vreplgr2vr.w $vr0, $a2
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = mul <4 x i32> %v0, <i32 17, i32 17, i32 17, i32 17>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @mul_v2i64_17(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: mul_v2i64_17:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    ori $a2, $zero, 17
+; CHECK-NEXT:    vreplgr2vr.d $vr0, $a2
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vmul.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = mul <2 x i64> %v0, <i64 17, i64 17>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sdiv.ll
new file mode 100644
index 000000000000000..b68f73a749135d6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sdiv.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @sdiv_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = sdiv <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = sdiv <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = sdiv <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sdiv_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = sdiv <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @sdiv_v16i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v16i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.b $vr1, $vr0, 7
+; CHECK-NEXT:    vsrli.b $vr1, $vr1, 5
+; CHECK-NEXT:    vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = sdiv <16 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v8i16_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v8i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.h $vr1, $vr0, 15
+; CHECK-NEXT:    vsrli.h $vr1, $vr1, 13
+; CHECK-NEXT:    vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = sdiv <8 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v4i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v4i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.w $vr1, $vr0, 31
+; CHECK-NEXT:    vsrli.w $vr1, $vr1, 29
+; CHECK-NEXT:    vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = sdiv <4 x i32> %v0, <i32 8, i32 8, i32 8, i32 8>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @sdiv_v2i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sdiv_v2i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.d $vr1, $vr0, 63
+; CHECK-NEXT:    vsrli.d $vr1, $vr1, 61
+; CHECK-NEXT:    vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = sdiv <2 x i64> %v0, <i64 8, i64 8>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shl.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shl.ll
new file mode 100644
index 000000000000000..fa0aebaf28b3c5e
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shl.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @shl_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsll.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = shl <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsll.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = shl <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsll.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = shl <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: shl_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsll.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = shl <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @shl_v16i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v16i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.b $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = shl <16 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v16i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v16i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.b $vr0, $vr0, 7
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = shl <16 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v8i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v8i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.h $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = shl <8 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v8i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v8i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.h $vr0, $vr0, 15
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = shl <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v4i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v4i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.w $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = shl <4 x i32> %v0, <i32 1, i32 1, i32 1, i32 1>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.w $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = shl <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v2i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v2i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.d $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = shl <2 x i64> %v0, <i64 1, i64 1>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @shl_v2i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: shl_v2i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vslli.d $vr0, $vr0, 63
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = shl <2 x i64> %v0, <i64 63, i64 63>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sub.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sub.ll
new file mode 100644
index 000000000000000..25b4623a47d1fc1
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sub.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @sub_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsub.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = sub <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsub.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = sub <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsub.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = sub <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: sub_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsub.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = sub <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @sub_v16i8_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v16i8_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsubi.bu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = sub <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v8i16_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v8i16_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsubi.hu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = sub <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsubi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = sub <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @sub_v2i64_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: sub_v2i64_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsubi.du $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = sub <2 x i64> %v0, <i64 31, i64 31>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll
new file mode 100644
index 000000000000000..abb60b91dd488f5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @udiv_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.bu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = udiv <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.hu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = udiv <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.wu $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = udiv <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: udiv_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vdiv.du $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = udiv <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @udiv_v16i8_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v16i8_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = udiv <16 x i8> %v0, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v8i16_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v8i16_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = udiv <8 x i16> %v0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v4i32_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v4i32_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = udiv <4 x i32> %v0, <i32 8, i32 8, i32 8, i32 8>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @udiv_v2i64_8(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: udiv_v2i64_8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.d $vr0, $vr0, 3
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = udiv <2 x i64> %v0, <i64 8, i64 8>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}


        


More information about the llvm-commits mailing list