[llvm] [LoongArch] Pre-commit test for loop term fold pass & vector sext, zext (PR #131742)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 17 23:52:14 PDT 2025
https://github.com/tangaac created https://github.com/llvm/llvm-project/pull/131742
None
>From 9b0f3d24aba693dc525ab6084e0684b35a7fca9e Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 18 Mar 2025 14:23:00 +0800
Subject: [PATCH] Pre-commit test for loop term fold pass & vector sext, zext
---
llvm/test/CodeGen/LoongArch/loop-term-fold.ll | 113 +++++++
.../LoongArch/lsx/vec-shuffle-any-ext.ll | 246 ++++++++++++++++
.../LoongArch/lsx/vec-shuffle-sign-ext.ll | 276 ++++++++++++++++++
3 files changed, 635 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/loop-term-fold.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-sign-ext.ll
diff --git a/llvm/test/CodeGen/LoongArch/loop-term-fold.ll b/llvm/test/CodeGen/LoongArch/loop-term-fold.ll
new file mode 100644
index 0000000000000..a7f4074320483
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/loop-term-fold.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s
+
+;; Whether Loop Term Fold Pass is enabled or not, this test only contains 2 add instructions.
+define void @const_tripcount_le_1024(ptr %a, ptr %b) {
+; CHECK-LABEL: const_tripcount_le_1024:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: move $a2, $zero
+; CHECK-NEXT: ori $a3, $zero, 2048
+; CHECK-NEXT: .p2align 4, , 16
+; CHECK-NEXT: .LBB0_1: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldx.h $a4, $a1, $a2
+; CHECK-NEXT: st.w $a4, $a0, 0
+; CHECK-NEXT: addi.d $a2, $a2, 2
+; CHECK-NEXT: addi.d $a0, $a0, 4
+; CHECK-NEXT: bne $a2, $a3, .LBB0_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw i16, ptr %b, i64 %indvars.iv
+ %0 = load i16, ptr %arrayidx
+ %conv = sext i16 %0 to i32
+ %arrayidx2 = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
+ store i32 %conv, ptr %arrayidx2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+
+;; After Loop Term Fold Pass enabled, this test only contains 2 add instructions.
+define void @const_tripcount_gt_1024(ptr %a, ptr %b) {
+; CHECK-LABEL: const_tripcount_gt_1024:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lu12i.w $a2, -2
+; CHECK-NEXT: ori $a2, $a2, 4092
+; CHECK-NEXT: .p2align 4, , 16
+; CHECK-NEXT: .LBB1_1: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ld.h $a3, $a1, 0
+; CHECK-NEXT: add.d $a4, $a0, $a2
+; CHECK-NEXT: stptr.w $a3, $a4, 4100
+; CHECK-NEXT: addi.d $a2, $a2, 4
+; CHECK-NEXT: addi.d $a1, $a1, 2
+; CHECK-NEXT: bnez $a2, .LBB1_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw i16, ptr %b, i64 %indvars.iv
+ %0 = load i16, ptr %arrayidx
+ %conv = sext i16 %0 to i32
+ %arrayidx2 = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
+ store i32 %conv, ptr %arrayidx2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, 1025
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+;; After Loop Term Fold Pass enabled, this test only contains 2 add instructions.
+define void @runtime_tripcount(ptr %a, ptr %b, i32 %n) {
+; CHECK-LABEL: runtime_tripcount:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi.w $a2, $a2, 0
+; CHECK-NEXT: ori $a3, $zero, 1
+; CHECK-NEXT: blt $a2, $a3, .LBB2_2
+; CHECK-NEXT: .p2align 4, , 16
+; CHECK-NEXT: .LBB2_1: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ld.h $a3, $a1, 0
+; CHECK-NEXT: st.w $a3, $a0, 0
+; CHECK-NEXT: addi.d $a2, $a2, -1
+; CHECK-NEXT: addi.d $a0, $a0, 4
+; CHECK-NEXT: addi.d $a1, $a1, 2
+; CHECK-NEXT: bnez $a2, .LBB2_1
+; CHECK-NEXT: .LBB2_2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %cmp6 = icmp sgt i32 %n, 0
+ br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %wide.trip.count = zext nneg i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup:
+ ret void
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds nuw i16, ptr %b, i64 %indvars.iv
+ %0 = load i16, ptr %arrayidx
+ %conv = sext i16 %0 to i32
+ %arrayidx2 = getelementptr inbounds nuw i32, ptr %a, i64 %indvars.iv
+ store i32 %conv, ptr %arrayidx2
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll
new file mode 100644
index 0000000000000..e5694cdf9fea9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll
@@ -0,0 +1,246 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+d,+lsx < %s | FileCheck %s
+
+define void @shuffle_any_ext_2i8_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_2i8_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.h $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI0_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI0_0)
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 0
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr1, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i8>, ptr %ptr
+ %y = shufflevector <2 x i8> %x, <2 x i8> poison, <16 x i32> <i32 0, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 1, i32 2, i32 2, i32 2,i32 2, i32 2, i32 2, i32 2>
+ %r = bitcast <16 x i8> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_2i16_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_2i16_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI1_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI1_0)
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 0
+; CHECK-NEXT: vshuf.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i16>, ptr %ptr
+ %y = shufflevector <2 x i16> %x, <2 x i16> poison, <8 x i32> <i32 0, i32 3, i32 3, i32 3, i32 1, i32 2, i32 2, i32 2>
+ %r = bitcast <8 x i16> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_2i32_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_2i32_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 16
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i32>, ptr %ptr
+ %y = shufflevector <2 x i32> %x, <2 x i32> poison, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %r = bitcast <4 x i32> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_4i8_to_4i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_4i8_to_4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 0
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr1, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i8>, ptr %ptr
+ %y = shufflevector <4 x i8> %x, <4 x i8> poison, <16 x i32> <i32 0, i32 7, i32 7, i32 7, i32 1, i32 6, i32 6, i32 6, i32 2, i32 5, i32 5, i32 5, i32 3, i32 4, i32 4, i32 4>
+ %r = bitcast <16 x i8> %y to <4 x i32>
+ store <4 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_4i16_to_4i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_4i16_to_4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT: vilvl.h $vr0, $vr0, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i16>, ptr %ptr
+ %y = shufflevector <4 x i16> %x, <4 x i16> poison, <8 x i32> <i32 0, i32 7, i32 1, i32 6, i32 2, i32 5, i32 3, i32 4>
+ %r = bitcast <8 x i16> %y to <4 x i32>
+ store <4 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_8i8_to_8i16(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_8i8_to_8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT: vilvl.b $vr0, $vr0, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i8>, ptr %ptr
+ %y = shufflevector <8 x i8> %x, <8 x i8> poison, <16 x i32> <i32 0, i32 15, i32 1, i32 14, i32 2, i32 13, i32 3, i32 12, i32 4, i32 11, i32 5, i32 10, i32 6, i32 9, i32 7, i32 8>
+ %r = bitcast <16 x i8> %y to <8 x i16>
+ store <8 x i16> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_4i32_to_4i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_4i32_to_4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 16
+; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 50
+; CHECK-NEXT: vst $vr0, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i32>, ptr %ptr
+ %y = shufflevector <4 x i32> %x, <4 x i32> poison, <8 x i32> <i32 0, i32 7, i32 1, i32 6, i32 2, i32 5, i32 3, i32 4>
+ %r = bitcast <8 x i32> %y to <4 x i64>
+ store <4 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_8i16_to_8i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_8i16_to_8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: vilvl.h $vr1, $vr0, $vr0
+; CHECK-NEXT: vilvh.h $vr0, $vr0, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i16>, ptr %ptr
+ %y = shufflevector <8 x i16> %x, <8 x i16> poison, <16 x i32> <i32 0, i32 15, i32 1, i32 15, i32 2, i32 13, i32 3, i32 12, i32 4, i32 11, i32 5, i32 10, i32 6, i32 9, i32 7, i32 8>
+ %r = bitcast <16 x i16> %y to <8 x i32>
+ store <8 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_8i16_to_8i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_8i16_to_8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI8_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_2)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI8_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_3)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI8_3)
+; CHECK-NEXT: vshuf.h $vr1, $vr0, $vr0
+; CHECK-NEXT: vshuf.h $vr2, $vr0, $vr0
+; CHECK-NEXT: vshuf.h $vr3, $vr0, $vr0
+; CHECK-NEXT: vshuf.h $vr4, $vr0, $vr0
+; CHECK-NEXT: vst $vr4, $a1, 48
+; CHECK-NEXT: vst $vr3, $a1, 32
+; CHECK-NEXT: vst $vr2, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i16>, ptr %ptr
+ %y = shufflevector <8 x i16> %x, <8 x i16> poison, <32 x i32> <i32 0, i32 15, i32 15, i32 15, i32 1, i32 14, i32 14, i32 14, i32 2, i32 13, i32 13, i32 13, i32 3, i32 12, i32 12, i32 12, i32 4, i32 11, i32 11, i32 11, i32 5, i32 10, i32 10, i32 10, i32 6, i32 9, i32 9, i32 9, i32 7, i32 8, i32 8, i32 8>
+ %r = bitcast <32 x i16> %y to <8 x i64>
+ store <8 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_16i8_to_16i16(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_16i8_to_16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: vilvl.b $vr1, $vr0, $vr0
+; CHECK-NEXT: vilvh.b $vr0, $vr0, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> poison, <32 x i32> <i32 0, i32 31, i32 1, i32 31, i32 2, i32 29, i32 3, i32 28, i32 4, i32 27, i32 5, i32 26, i32 6, i32 25, i32 7, i32 24, i32 8, i32 23, i32 9, i32 22, i32 10, i32 21, i32 11, i32 20, i32 12, i32 19, i32 13, i32 18, i32 14, i32 17, i32 15, i32 16 >
+ %r = bitcast <32 x i8> %y to <16 x i16>
+ store <16 x i16> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_16i8_to_16i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_16i8_to_16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI10_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_2)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI10_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_3)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI10_3)
+; CHECK-NEXT: vshuf.b $vr1, $vr0, $vr0, $vr1
+; CHECK-NEXT: vshuf.b $vr2, $vr0, $vr0, $vr2
+; CHECK-NEXT: vshuf.b $vr3, $vr0, $vr0, $vr3
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr4
+; CHECK-NEXT: vst $vr0, $a1, 48
+; CHECK-NEXT: vst $vr3, $a1, 32
+; CHECK-NEXT: vst $vr2, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> poison, <64 x i32> <i32 0, i32 31, i32 31, i32 31, i32 1, i32 30, i32 30, i32 30, i32 2, i32 29, i32 29, i32 29, i32 3, i32 28, i32 28, i32 28, i32 4, i32 27, i32 27, i32 27, i32 5, i32 26, i32 26, i32 26, i32 6, i32 25, i32 25, i32 25, i32 7, i32 24, i32 24, i32 24, i32 8, i32 23, i32 23, i32 23, i32 9, i32 22, i32 22, i32 22, i32 10, i32 21, i32 21, i32 21, i32 11, i32 20, i32 20, i32 20, i32 12, i32 19, i32 19, i32 19, i32 13, i32 18, i32 18, i32 18, i32 14, i32 17, i32 17, i32 17, i32 15, i32 16, i32 16, i32 16>
+ %r = bitcast <64 x i8> %y to <16 x i32>
+ store <16 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_any_ext_16i8_to_16i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_any_ext_16i8_to_16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI11_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_2)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI11_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_3)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI11_3)
+; CHECK-NEXT: vshuf.b $vr1, $vr0, $vr0, $vr1
+; CHECK-NEXT: vshuf.b $vr2, $vr0, $vr0, $vr2
+; CHECK-NEXT: vshuf.b $vr3, $vr0, $vr0, $vr3
+; CHECK-NEXT: vshuf.b $vr4, $vr0, $vr0, $vr4
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_4)
+; CHECK-NEXT: vld $vr5, $a0, %pc_lo12(.LCPI11_4)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_5)
+; CHECK-NEXT: vld $vr6, $a0, %pc_lo12(.LCPI11_5)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_6)
+; CHECK-NEXT: vld $vr7, $a0, %pc_lo12(.LCPI11_6)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_7)
+; CHECK-NEXT: vld $vr8, $a0, %pc_lo12(.LCPI11_7)
+; CHECK-NEXT: vshuf.b $vr5, $vr0, $vr0, $vr5
+; CHECK-NEXT: vshuf.b $vr6, $vr0, $vr0, $vr6
+; CHECK-NEXT: vshuf.b $vr7, $vr0, $vr0, $vr7
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr8
+; CHECK-NEXT: vst $vr0, $a1, 112
+; CHECK-NEXT: vst $vr7, $a1, 96
+; CHECK-NEXT: vst $vr6, $a1, 80
+; CHECK-NEXT: vst $vr5, $a1, 64
+; CHECK-NEXT: vst $vr4, $a1, 48
+; CHECK-NEXT: vst $vr3, $a1, 32
+; CHECK-NEXT: vst $vr2, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> poison, <128 x i32> <i32 0, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 1, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 2, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 3, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 4, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 5, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 6, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 7, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 8, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 9, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 10, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 11, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 12, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 13, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 14, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 15, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %r = bitcast <128 x i8> %y to <16 x i64>
+ store <16 x i64> %r, ptr %dst
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-sign-ext.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-sign-ext.ll
new file mode 100644
index 0000000000000..6a903fe17e875
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-sign-ext.ll
@@ -0,0 +1,276 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+d,+lsx < %s | FileCheck %s
+
+define void @shuffle_sign_ext_2i8_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_2i8_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.h $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI0_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI0_0)
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.b $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i8>, ptr %ptr
+ %y = shufflevector <2 x i8> %x, <2 x i8> zeroinitializer, <16 x i32> <i32 0, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 1, i32 2, i32 2, i32 2,i32 2, i32 2, i32 2, i32 2>
+ %r = bitcast <16 x i8> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_2i16_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_2i16_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI1_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI1_0)
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.h $vr0, $vr1, $vr2
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i16>, ptr %ptr
+ %y = shufflevector <2 x i16> %x, <2 x i16> zeroinitializer, <8 x i32> <i32 0, i32 3, i32 3, i32 3, i32 1, i32 2, i32 2, i32 2>
+ %r = bitcast <8 x i16> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_2i32_to_2i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_2i32_to_2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI2_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI2_0)
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.w $vr0, $vr1, $vr2
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <2 x i32>, ptr %ptr
+ %y = shufflevector <2 x i32> %x, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 3, i32 1, i32 2>
+ %r = bitcast <4 x i32> %y to <2 x i64>
+ store <2 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_4i8_to_4i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_4i8_to_4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI3_0)
+; CHECK-NEXT: vinsgr2vr.w $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.b $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i8>, ptr %ptr
+ %y = shufflevector <4 x i8> %x, <4 x i8> zeroinitializer, <16 x i32> <i32 0, i32 7, i32 7, i32 7, i32 1, i32 6, i32 6, i32 6, i32 2, i32 5, i32 5, i32 5, i32 3, i32 4, i32 4, i32 4>
+ %r = bitcast <16 x i8> %y to <4 x i32>
+ store <4 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_4i16_to_4i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_4i16_to_4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI4_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI4_0)
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.h $vr0, $vr1, $vr2
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i16>, ptr %ptr
+ %y = shufflevector <4 x i16> %x, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 7, i32 1, i32 6, i32 2, i32 5, i32 3, i32 4>
+ %r = bitcast <8 x i16> %y to <4 x i32>
+ store <4 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_8i8_to_8i16(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_8i8_to_8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI5_0)
+; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI5_0)
+; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: vshuf.b $vr0, $vr1, $vr2, $vr0
+; CHECK-NEXT: vst $vr0, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i8>, ptr %ptr
+ %y = shufflevector <8 x i8> %x, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 15, i32 1, i32 14, i32 2, i32 13, i32 3, i32 12, i32 4, i32 11, i32 5, i32 10, i32 6, i32 9, i32 7, i32 8>
+ %r = bitcast <16 x i8> %y to <8 x i16>
+ store <8 x i16> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_4i32_to_4i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_4i32_to_4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI6_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI6_1)
+; CHECK-NEXT: vrepli.b $vr3, 0
+; CHECK-NEXT: vshuf.w $vr1, $vr0, $vr3
+; CHECK-NEXT: vshuf.w $vr2, $vr0, $vr3
+; CHECK-NEXT: vst $vr2, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <4 x i32>, ptr %ptr
+ %y = shufflevector <4 x i32> %x, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 7, i32 1, i32 6, i32 2, i32 5, i32 3, i32 4>
+ %r = bitcast <8 x i32> %y to <4 x i64>
+ store <4 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_8i16_to_8i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_8i16_to_8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI7_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI7_1)
+; CHECK-NEXT: vrepli.b $vr3, 0
+; CHECK-NEXT: vshuf.h $vr1, $vr0, $vr3
+; CHECK-NEXT: vshuf.h $vr2, $vr0, $vr3
+; CHECK-NEXT: vst $vr2, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i16>, ptr %ptr
+ %y = shufflevector <8 x i16> %x, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 15, i32 1, i32 14, i32 2, i32 13, i32 3, i32 12, i32 4, i32 11, i32 5, i32 10, i32 6, i32 9, i32 7, i32 8>
+ %r = bitcast <16 x i16> %y to <8 x i32>
+ store <8 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_8i16_to_8i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_8i16_to_8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI8_0)
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_1)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI8_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_2)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI8_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_3)
+; CHECK-NEXT: vld $vr5, $a0, %pc_lo12(.LCPI8_3)
+; CHECK-NEXT: vshuf.h $vr1, $vr0, $vr2
+; CHECK-NEXT: vshuf.h $vr3, $vr0, $vr2
+; CHECK-NEXT: vshuf.h $vr4, $vr0, $vr2
+; CHECK-NEXT: vshuf.h $vr5, $vr0, $vr2
+; CHECK-NEXT: vst $vr5, $a1, 48
+; CHECK-NEXT: vst $vr4, $a1, 32
+; CHECK-NEXT: vst $vr3, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <8 x i16>, ptr %ptr
+ %y = shufflevector <8 x i16> %x, <8 x i16> zeroinitializer, <32 x i32> <i32 0, i32 15, i32 15, i32 15, i32 1, i32 14, i32 14, i32 14, i32 2, i32 13, i32 13, i32 13, i32 3, i32 12, i32 12, i32 12, i32 4, i32 11, i32 11, i32 11, i32 5, i32 10, i32 10, i32 10, i32 6, i32 9, i32 9, i32 9, i32 7, i32 8, i32 8, i32 8>
+ %r = bitcast <32 x i16> %y to <8 x i64>
+ store <8 x i64> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_16i8_to_16i16(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_16i8_to_16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI9_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_1)
+; CHECK-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI9_1)
+; CHECK-NEXT: vrepli.b $vr3, 0
+; CHECK-NEXT: vshuf.b $vr1, $vr0, $vr3, $vr1
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr3, $vr2
+; CHECK-NEXT: vst $vr0, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <32 x i32> <i32 0, i32 31, i32 1, i32 30, i32 2, i32 29, i32 3, i32 28, i32 4, i32 27, i32 5, i32 26, i32 6, i32 25, i32 7, i32 24, i32 8, i32 23, i32 9, i32 22, i32 10, i32 21, i32 11, i32 20, i32 12, i32 19, i32 13, i32 18, i32 14, i32 17, i32 15, i32 16 >
+ %r = bitcast <32 x i8> %y to <16 x i16>
+ store <16 x i16> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_16i8_to_16i32(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_16i8_to_16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI10_0)
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_1)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI10_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_2)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI10_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_3)
+; CHECK-NEXT: vld $vr5, $a0, %pc_lo12(.LCPI10_3)
+; CHECK-NEXT: vshuf.b $vr1, $vr0, $vr2, $vr1
+; CHECK-NEXT: vshuf.b $vr3, $vr0, $vr2, $vr3
+; CHECK-NEXT: vshuf.b $vr4, $vr0, $vr2, $vr4
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr2, $vr5
+; CHECK-NEXT: vst $vr0, $a1, 48
+; CHECK-NEXT: vst $vr4, $a1, 32
+; CHECK-NEXT: vst $vr3, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <64 x i32> <i32 0, i32 31, i32 31, i32 31, i32 1, i32 30, i32 30, i32 30, i32 2, i32 29, i32 29, i32 29, i32 3, i32 28, i32 28, i32 28, i32 4, i32 27, i32 27, i32 27, i32 5, i32 26, i32 26, i32 26, i32 6, i32 25, i32 25, i32 25, i32 7, i32 24, i32 24, i32 24, i32 8, i32 23, i32 23, i32 23, i32 9, i32 22, i32 22, i32 22, i32 10, i32 21, i32 21, i32 21, i32 11, i32 20, i32 20, i32 20, i32 12, i32 19, i32 19, i32 19, i32 13, i32 18, i32 18, i32 18, i32 14, i32 17, i32 17, i32 17, i32 15, i32 16, i32 16, i32 16>
+ %r = bitcast <64 x i8> %y to <16 x i32>
+ store <16 x i32> %r, ptr %dst
+ ret void
+}
+
+define void @shuffle_sign_ext_16i8_to_16i64(ptr %ptr, ptr %dst) {
+; CHECK-LABEL: shuffle_sign_ext_16i8_to_16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vld $vr0, $a0, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0)
+; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI11_0)
+; CHECK-NEXT: vrepli.b $vr2, 0
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_1)
+; CHECK-NEXT: vld $vr3, $a0, %pc_lo12(.LCPI11_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_2)
+; CHECK-NEXT: vld $vr4, $a0, %pc_lo12(.LCPI11_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_3)
+; CHECK-NEXT: vld $vr5, $a0, %pc_lo12(.LCPI11_3)
+; CHECK-NEXT: vshuf.b $vr1, $vr0, $vr2, $vr1
+; CHECK-NEXT: vshuf.b $vr3, $vr0, $vr2, $vr3
+; CHECK-NEXT: vshuf.b $vr4, $vr0, $vr2, $vr4
+; CHECK-NEXT: vshuf.b $vr5, $vr0, $vr2, $vr5
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_4)
+; CHECK-NEXT: vld $vr6, $a0, %pc_lo12(.LCPI11_4)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_5)
+; CHECK-NEXT: vld $vr7, $a0, %pc_lo12(.LCPI11_5)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_6)
+; CHECK-NEXT: vld $vr8, $a0, %pc_lo12(.LCPI11_6)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_7)
+; CHECK-NEXT: vld $vr9, $a0, %pc_lo12(.LCPI11_7)
+; CHECK-NEXT: vshuf.b $vr6, $vr0, $vr2, $vr6
+; CHECK-NEXT: vshuf.b $vr7, $vr0, $vr2, $vr7
+; CHECK-NEXT: vshuf.b $vr8, $vr0, $vr2, $vr8
+; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr2, $vr9
+; CHECK-NEXT: vst $vr0, $a1, 112
+; CHECK-NEXT: vst $vr8, $a1, 96
+; CHECK-NEXT: vst $vr7, $a1, 80
+; CHECK-NEXT: vst $vr6, $a1, 64
+; CHECK-NEXT: vst $vr5, $a1, 48
+; CHECK-NEXT: vst $vr4, $a1, 32
+; CHECK-NEXT: vst $vr3, $a1, 16
+; CHECK-NEXT: vst $vr1, $a1, 0
+; CHECK-NEXT: ret
+ %x = load <16 x i8>, ptr %ptr
+ %y = shufflevector <16 x i8> %x, <16 x i8> zeroinitializer, <128 x i32> <i32 0, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 1, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 30, i32 2, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 29, i32 3, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 28, i32 4, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 27, i32 5, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 26, i32 6, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 7, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 8, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 23, i32 9, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 22, i32 10, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 21, i32 11, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 20, i32 12, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 19, i32 13, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 18, i32 14, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 15, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %r = bitcast <128 x i8> %y to <16 x i64>
+ store <16 x i64> %r, ptr %dst
+ ret void
+}
More information about the llvm-commits
mailing list