[llvm] [LoongArch] Add some binary IR instructions testcases for LSX (PR #73929)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 30 04:30:46 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-loongarch

Author: None (leecheechen)

<details>
<summary>Changes</summary>

The IR instructions include:
- Binary Operations: add fadd sub fsub mul fmul udiv sdiv fdiv
- Bitwise Binary Operations: shl lshr ashr

---

Patch is 45.14 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/73929.diff


12 Files Affected:

- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll (+122) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll (+178) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll (+34) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll (+34) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll (+34) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll (+34) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll (+178) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/mul.ll (+242) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sdiv.ll (+134) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shl.ll (+178) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/sub.ll (+122) 
- (added) llvm/test/CodeGen/LoongArch/lsx/ir-instruction/udiv.ll (+122) 


``````````diff
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll
new file mode 100644
index 000000000000000..2a7c37c2ae346ef
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/add.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @add_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = add <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @add_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = add <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @add_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = add <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @add_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: add_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vadd.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = add <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @add_v16i8_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v16i8_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.bu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = add <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @add_v8i16_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v8i16_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.hu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = add <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @add_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.wu $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = add <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @add_v2i64_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: add_v2i64_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vaddi.du $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = add <2 x i64> %v0, <i64 31, i64 31>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll
new file mode 100644
index 000000000000000..fbc570d77ba8038
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/ashr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @ashr_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = ashr <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = ashr <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = ashr <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: ashr_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsra.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = ashr <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @ashr_v16i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = ashr <16 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v16i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v16i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.b $vr0, $vr0, 7
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = ashr <16 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = ashr <8 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v8i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v8i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.h $vr0, $vr0, 15
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = ashr <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = ashr <4 x i32> %v0, <i32 1, i32 1, i32 1, i32 1>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.w $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = ashr <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v2i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = ashr <2 x i64> %v0, <i64 1, i64 1>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @ashr_v2i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: ashr_v2i64_63:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrai.d $vr0, $vr0, 63
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = ashr <2 x i64> %v0, <i64 63, i64 63>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll
new file mode 100644
index 000000000000000..1fa1f611c4a36c7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fadd.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fadd_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfadd.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fadd <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fadd_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fadd_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfadd.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fadd <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll
new file mode 100644
index 000000000000000..eb7c8bd9616ec72
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fdiv_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfdiv.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fdiv <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fdiv_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fdiv_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfdiv.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fdiv <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll
new file mode 100644
index 000000000000000..e7fb527f7805e81
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fmul.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fmul_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfmul.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fmul <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fmul_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fmul_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfmul.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fmul <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll
new file mode 100644
index 000000000000000..df98182321dab9b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fsub.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @fsub_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfsub.s $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x float>, ptr %a0
+  %v1 = load <4 x float>, ptr %a1
+  %v2 = fsub <4 x float> %v0, %v1
+  store <4 x float> %v2, ptr %res
+  ret void
+}
+
+define void @fsub_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: fsub_v2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vfsub.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x double>, ptr %a0
+  %v1 = load <2 x double>, ptr %a1
+  %v2 = fsub <2 x double> %v0, %v1
+  store <2 x double> %v2, ptr %res
+  ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll
new file mode 100644
index 000000000000000..dada52f93060e10
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/lshr.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @lshr_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.b $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = load <16 x i8>, ptr %a1
+  %v2 = lshr <16 x i8> %v0, %v1
+  store <16 x i8> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.h $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = load <8 x i16>, ptr %a1
+  %v2 = lshr <8 x i16> %v0, %v1
+  store <8 x i16> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.w $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = load <4 x i32>, ptr %a1
+  %v2 = lshr <4 x i32> %v0, %v1
+  store <4 x i32> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
+; CHECK-LABEL: lshr_v2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a2, 0
+; CHECK-NEXT:    vld $vr1, $a1, 0
+; CHECK-NEXT:    vsrl.d $vr0, $vr1, $vr0
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = load <2 x i64>, ptr %a1
+  %v2 = lshr <2 x i64> %v0, %v1
+  store <2 x i64> %v2, ptr %res
+  ret void
+}
+
+define void @lshr_v16i8_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i8_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = lshr <16 x i8> %v0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v16i8_7(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v16i8_7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.b $vr0, $vr0, 7
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <16 x i8>, ptr %a0
+  %v1 = lshr <16 x i8> %v0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  store <16 x i8> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i16_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = lshr <8 x i16> %v0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v8i16_15(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v8i16_15:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.h $vr0, $vr0, 15
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <8 x i16>, ptr %a0
+  %v1 = lshr <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  store <8 x i16> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i32_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = lshr <4 x i32> %v0, <i32 1, i32 1, i32 1, i32 1>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v4i32_31(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v4i32_31:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.w $vr0, $vr0, 31
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <4 x i32>, ptr %a0
+  %v1 = lshr <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
+  store <4 x i32> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64_1(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v2i64_1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vld $vr0, $a1, 0
+; CHECK-NEXT:    vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT:    vst $vr0, $a0, 0
+; CHECK-NEXT:    ret
+entry:
+  %v0 = load <2 x i64>, ptr %a0
+  %v1 = lshr <2 x i64> %v0, <i64 1, i64 1>
+  store <2 x i64> %v1, ptr %res
+  ret void
+}
+
+define void @lshr_v2i64_63(ptr %res, ptr %a0) nounwind {
+; CHECK-LABEL: lshr_v2i64_63:
+; CHECK:       # %bb...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/73929


More information about the llvm-commits mailing list