[llvm] [LoongArch][NFC] Pre-commit tests for 256-bit vector trunc (PR #170091)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 1 01:51:11 PST 2025


https://github.com/zhaoqi5 created https://github.com/llvm/llvm-project/pull/170091

None

>From 267dc9d6420df1ba4899dc135cf138822775631e Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Mon, 1 Dec 2025 17:37:22 +0800
Subject: [PATCH] [LoongArch][NFC] Pre-commit tests for 256-bit vector trunc

---
 llvm/test/CodeGen/LoongArch/lasx/vec-trunc.ll | 302 ++++++++++++++++++
 1 file changed, 302 insertions(+)
 create mode 100644 llvm/test/CodeGen/LoongArch/lasx/vec-trunc.ll

diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-trunc.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-trunc.ll
new file mode 100644
index 0000000000000..3802b9df6043d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/vec-trunc.ll
@@ -0,0 +1,302 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefix=LA64
+
+define void @trunc_v4i64_to_v4i32(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v4i64_to_v4i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA32-NEXT:    vinsgr2vr.w $vr1, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA32-NEXT:    vinsgr2vr.w $vr1, $a1, 1
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA32-NEXT:    vinsgr2vr.w $vr1, $a1, 2
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA32-NEXT:    vinsgr2vr.w $vr1, $a1, 3
+; LA32-NEXT:    vst $vr1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v4i64_to_v4i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 0
+; LA64-NEXT:    vinsgr2vr.w $vr1, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 1
+; LA64-NEXT:    vinsgr2vr.w $vr1, $a1, 1
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 2
+; LA64-NEXT:    vinsgr2vr.w $vr1, $a1, 2
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 3
+; LA64-NEXT:    vinsgr2vr.w $vr1, $a1, 3
+; LA64-NEXT:    vst $vr1, $a0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <4 x i64>, ptr %a
+  %vtrunc = trunc <4 x i64> %v to <4 x i32>
+  store <4 x i32> %vtrunc, ptr %res
+  ret void
+}
+
+define void @trunc_v4i64_to_v4i16(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v4i64_to_v4i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 1
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 2
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 3
+; LA32-NEXT:    vpickve2gr.w $a1, $vr1, 1
+; LA32-NEXT:    st.w $a1, $a0, 4
+; LA32-NEXT:    vpickve2gr.w $a1, $vr1, 0
+; LA32-NEXT:    st.w $a1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v4i64_to_v4i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 0
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 1
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 1
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 2
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 2
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 3
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 3
+; LA64-NEXT:    vstelm.d $vr1, $a0, 0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <4 x i64>, ptr %a
+  %vtrunc = trunc <4 x i64> %v to <4 x i16>
+  store <4 x i16> %vtrunc, ptr %res
+  ret void
+}
+
+define void @trunc_v4i64_to_v4i8(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v4i64_to_v4i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA32-NEXT:    vpickve2gr.w $a1, $vr1, 0
+; LA32-NEXT:    st.w $a1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v4i64_to_v4i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 0
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 1
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 2
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA64-NEXT:    xvpickve2gr.d $a1, $xr0, 3
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA64-NEXT:    vstelm.w $vr1, $a0, 0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <4 x i64>, ptr %a
+  %vtrunc = trunc <4 x i64> %v to <4 x i8>
+  store <4 x i8> %vtrunc, ptr %res
+  ret void
+}
+
+define void @trunc_v8i32_to_v8i16(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v8i32_to_v8i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 1
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 1
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 2
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 3
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 3
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 4
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 5
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 5
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 6
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 7
+; LA32-NEXT:    vinsgr2vr.h $vr1, $a1, 7
+; LA32-NEXT:    vst $vr1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v8i32_to_v8i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 0
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 1
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 1
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 2
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 3
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 3
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 4
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 5
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 5
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 6
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 7
+; LA64-NEXT:    vinsgr2vr.h $vr1, $a1, 7
+; LA64-NEXT:    vst $vr1, $a0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <8 x i32>, ptr %a
+  %vtrunc = trunc <8 x i32> %v to <8 x i16>
+  store <8 x i16> %vtrunc, ptr %res
+  ret void
+}
+
+define void @trunc_v8i32_to_v8i8(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v8i32_to_v8i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 1
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 3
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 4
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 5
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 5
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 6
+; LA32-NEXT:    xvpickve2gr.w $a1, $xr0, 7
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 7
+; LA32-NEXT:    vpickve2gr.w $a1, $vr1, 1
+; LA32-NEXT:    st.w $a1, $a0, 4
+; LA32-NEXT:    vpickve2gr.w $a1, $vr1, 0
+; LA32-NEXT:    st.w $a1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v8i32_to_v8i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 0
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 1
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 2
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 3
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 4
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 4
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 5
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 5
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 6
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 6
+; LA64-NEXT:    xvpickve2gr.w $a1, $xr0, 7
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 7
+; LA64-NEXT:    vstelm.d $vr1, $a0, 0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <8 x i32>, ptr %a
+  %vtrunc = trunc <8 x i32> %v to <8 x i8>
+  store <8 x i8> %vtrunc, ptr %res
+  ret void
+}
+
+define void @trunc_v16i16_to_v16i8(ptr %res, ptr %a) nounwind {
+; LA32-LABEL: trunc_v16i16_to_v16i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xvld $xr0, $a1, 0
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 0
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 1
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 2
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 3
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 4
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 4
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 5
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 5
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 6
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 6
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 7
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 7
+; LA32-NEXT:    xvpermi.d $xr0, $xr0, 14
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 0
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 8
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 1
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 9
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 2
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 10
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 3
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 11
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 4
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 12
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 5
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 13
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 6
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 14
+; LA32-NEXT:    vpickve2gr.h $a1, $vr0, 7
+; LA32-NEXT:    vinsgr2vr.b $vr1, $a1, 15
+; LA32-NEXT:    vst $vr1, $a0, 0
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: trunc_v16i16_to_v16i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xvld $xr0, $a1, 0
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 0
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 0
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 1
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 1
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 2
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 2
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 3
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 3
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 4
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 4
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 5
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 5
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 6
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 6
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 7
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 7
+; LA64-NEXT:    xvpermi.d $xr0, $xr0, 14
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 0
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 8
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 1
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 9
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 2
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 10
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 3
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 11
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 4
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 12
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 5
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 13
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 6
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 14
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 7
+; LA64-NEXT:    vinsgr2vr.b $vr1, $a1, 15
+; LA64-NEXT:    vst $vr1, $a0, 0
+; LA64-NEXT:    ret
+entry:
+  %v = load <16 x i16>, ptr %a
+  %vtrunc = trunc <16 x i16> %v to <16 x i8>
+  store <16 x i8> %vtrunc, ptr %res
+  ret void
+}



More information about the llvm-commits mailing list