[llvm] [RISCV] Lower disjoint shuffle of shuffles to MERGE-GATHER sequence (PR #178095)
Ryan Buchner via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 5 00:33:48 PST 2026
https://github.com/bababuck updated https://github.com/llvm/llvm-project/pull/178095
>From 21c1013950ca19bf44e5194ee611ea3a096a14c2 Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Mon, 26 Jan 2026 14:15:50 -0800
Subject: [PATCH 01/16] [RISCV] Add new test for shuffle merge lowerings
These are cases of that can be lowered into a merge followed
by a gather.
---
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 208 ++++++++++++++++++
1 file changed, 208 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
new file mode 100644
index 0000000000000..6d9ebae73cc0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -0,0 +1,208 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK
+
+; Can be optimized as a merge followed by a shuffle
+define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_disjoint:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI0_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
+
+; Can be optimized as a merge followed by a shuffle
+define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_disjoint_unordered:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_0)
+; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_1)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI1_2)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_2)
+; CHECK-NEXT: vle16.v v14, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a1, a1, 1
+; CHECK-NEXT: vrgather.vv v16, v10, v12
+; CHECK-NEXT: vslidedown.vx v10, v14, a1
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v11, v16, v10
+; CHECK-NEXT: vrgather.vv v10, v16, v14
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI1_3)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_3)
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, 6
+; CHECK-NEXT: addi a0, a0, 1830
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vrgather.vv v16, v8, v12
+; CHECK-NEXT: vslidedown.vx v8, v14, a1
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v16, v8
+; CHECK-NEXT: vrgather.vv v8, v16, v14
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 19, i32 23, i32 3, i32 4, i32 20, i32 6, i32 7, i32 16, i32 17, i32 18, i32 1, i32 5, i32 21, i32 22, i32 2>
+ ret <16 x i16> %merge
+}
+
+; Can be optimized since the lanes are disjoint, but a single lane is used multiple times by one of the vectors
+define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_duplicated_within_operand:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI2_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
+
+; Can't be optimized as merge-shuffle since the same lane (8) is used from both operands
+define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_duplicated_lane:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI3_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 8, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
+
+; Can't be optimized since shuff0 is used twice
+define <16 x i16> @shuffle_shuffle_multiple_uses(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_multiple_uses:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI4_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_1)
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: vrgather.vv v16, v8, v12
+; CHECK-NEXT: vrgather.vv v8, v10, v14
+; CHECK-NEXT: vmv.v.v v10, v16
+; CHECK-NEXT: vslideup.vi v10, v8, 8
+; CHECK-NEXT: vadd.vv v8, v16, v10
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ %add = add <16 x i16> %shuff0, %merge
+ ret <16 x i16> %add
+}
+
+; Can be optimized as a merge followed by a shuffle
+define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_unbalanced:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: lui a0, %hi(.LCPI5_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_1)
+; CHECK-NEXT: vrgather.vv v14, v8, v12
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vmv.v.v v8, v14
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vslideup.vi v8, v14, 3, v0.t
+; CHECK-NEXT: li a0, 112
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vrgather.vv v14, v10, v12
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vslideup.vi v8, v14, 4, v0.t
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 8, i32 9>
+ ret <16 x i16> %merge
+}
+
+; Can't be optimized since final one of the elements used is shuffled to a poison index
+define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_poison:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI6_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 poison, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
+
+; Can't be optimized since final one of the elements used is shuffled to a poison index
+define <16 x i16> @shuffle_shuffle_poison2(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: shuffle_shuffle_poison2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI7_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 poison, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
>From 7952ba9ec28d453d3aaef4af7120524da618c44c Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Mon, 26 Jan 2026 14:16:38 -0800
Subject: [PATCH 02/16] [RISCV] Lower disjoint shuffle of shuffles to
MERGE-GATHER sequence
A shuffle of shuffles where:
- the first two shuffes only shuffle from a single operand
- no pair of elements in the result come from the same lane of different input operands
such as:
%shuff0 = shufflevector <4 x i16> %op0, <4 x i16> poison, <4 x i32> <i32 0, i32 3, i32 poison, i32 poison>
%shuff1 = shufflevector <4 x i16> %op1, <4 x i16> poison, <4 x i32> <i32 1, i32 2, i32 poison, i32 poison>
%merge = shufflevector <4 x i16> %shuff0, <4 x i16> %shuff1, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
can be lowered into (pseudo-IR):
%merge = VMERGE.VVM <4 x i16> %op0, <4 x i16> %op1, <4 x i1> <i1 1, i1 0, i1 0, i1 1>
%gather = VRGATHER.VV <4 x i16> %merge, <4 x i32> <i32 0, i32 3, i32, 1, i32 2>
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 105 ++++++++++++++++++
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 84 +++++---------
2 files changed, 131 insertions(+), 58 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 436ce16784c75..a1474e32232e4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5439,6 +5439,107 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
+// A shuffle of shuffles where:
+// - the first level of shuffles both only used data from a single input
+// - the final output doesn't use data from the same lane of both operands
+// This can be lowered to a MERGE followed by a GATHER
+static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
+ if (V1.getOpcode() != ISD::VECTOR_SHUFFLE ||
+ V2.getOpcode() != ISD::VECTOR_SHUFFLE)
+ return SDValue();
+
+ if (!V1.hasOneUse() || !V2.hasOneUse())
+ return SDValue();
+
+ // Can just be reduced into a single gather operation
+ if (V1.getOperand(0) == V2.getOperand(0))
+ return SDValue();
+
+ unsigned NumElts = VT.getVectorNumElements();
+ auto *SVN1 = cast<ShuffleVectorSDNode>(V1.getNode());
+ auto *SVN2 = cast<ShuffleVectorSDNode>(V2.getNode());
+ auto V1Mask = SVN1->getMask();
+ auto V2Mask = SVN2->getMask();
+ // 0: Not set, 1: Set by V1, 2: Set by V2
+ SmallVector<unsigned> ShuffleLaneUses(NumElts, 0);
+ for (unsigned Idx : seq<unsigned>(NumElts)) {
+ int Lane = Mask[Idx];
+ auto LanePoisonOrOOB = [](int Lane, unsigned NumElts) -> bool {
+ return Lane < 0 || Lane > (int)NumElts;
+ };
+ // Don't handle if the index is poison or out of bounds
+ if (LanePoisonOrOOB(Lane, 2 * NumElts))
+ return SDValue();
+ unsigned OpNum;
+ int OrigLane;
+ if ((unsigned)Lane < NumElts) {
+ OpNum = 1;
+ OrigLane = V1Mask[Lane];
+ } else {
+ OpNum = 2;
+ OrigLane = V2Mask[Lane - NumElts];
+ }
+ // Don't handle if the index is poison or if shuffling from a second
+ // operand
+ if (LanePoisonOrOOB(OrigLane, NumElts))
+ return SDValue();
+
+ const unsigned CurrLaneSrc = ShuffleLaneUses[OrigLane];
+ // Can't use the same lane from both operands in the merge
+ if (CurrLaneSrc != 0 && CurrLaneSrc != OpNum)
+ return SDValue();
+ ShuffleLaneUses[OrigLane] = OpNum;
+ }
+
+ MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+ auto [_, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+ // Create the mask for the initial merge
+ auto XLenVT = Subtarget.getXLenVT();
+ SmallVector<SDValue> MergeMaskVals(NumElts);
+ for (unsigned Idx : seq<unsigned>(NumElts)) {
+ // If lane not used from either operand, use poison
+ if (ShuffleLaneUses[Idx] == 0)
+ MergeMaskVals[Idx] = DAG.getPOISON(XLenVT);
+ else
+ MergeMaskVals[Idx] =
+ DAG.getConstant(ShuffleLaneUses[Idx] == 1 ? 1 : 0, DL, XLenVT);
+ }
+ MVT MergeMaskVT = MVT::getVectorVT(MVT::i1, NumElts);
+ SDValue MergeMask = DAG.getBuildVector(MergeMaskVT, DL, MergeMaskVals);
+ MVT MaskContainerVT =
+ getContainerForFixedLengthVector(DAG, MergeMaskVT, Subtarget);
+
+ SDValue Merge = DAG.getNode(
+ RISCVISD::VMERGE_VL, DL, ContainerVT,
+ convertToScalableVector(MaskContainerVT, MergeMask, DAG, Subtarget),
+ convertToScalableVector(ContainerVT, V1.getOperand(0), DAG, Subtarget),
+ convertToScalableVector(ContainerVT, V2.getOperand(0), DAG, Subtarget),
+ DAG.getUNDEF(ContainerVT), VL);
+
+ // Create the constant vector for the gather
+ SmallVector<SDValue> GatherVals(NumElts);
+ for (unsigned Idx : seq<unsigned>(NumElts)) {
+ // In bounds checks for Mask done already
+ int Lane = Mask[Idx];
+ if ((unsigned)Lane < NumElts)
+ GatherVals[Idx] = DAG.getConstant(V1Mask[Lane], DL, XLenVT);
+ else
+ GatherVals[Idx] = DAG.getConstant(V2Mask[Lane - NumElts], DL, XLenVT);
+ }
+ SDValue GatherMask = DAG.getBuildVector(VT, DL, GatherVals);
+
+ SDValue Gather = DAG.getNode(
+ RISCVISD::VRGATHER_VV_VL, DL, ContainerVT, Merge,
+ convertToScalableVector(ContainerVT, GatherMask, DAG, Subtarget),
+ DAG.getUNDEF(ContainerVT), DAG.getUNDEF(MaskContainerVT), VL);
+ return convertFromScalableVector(VT, Gather, DAG, Subtarget);
+}
+
/// Match v(f)slide1up/down idioms. These operations involve sliding
/// N-1 elements to make room for an inserted scalar at one end.
static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT,
@@ -6219,6 +6320,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
+ if (SDValue V = lowerVECTOR_SHUFFLEAsMergeGather(DL, VT, V1, V2, Mask,
+ Subtarget, DAG))
+ return V;
+
// A bitrotate will be one instruction on Zvkb, so try to lower to it first if
// available.
if (Subtarget.hasStdExtZvkb())
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 6d9ebae73cc0f..aafb69ca03ca7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -8,14 +8,13 @@ define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI0_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 819
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -29,35 +28,14 @@ define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_0)
-; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_1)
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI1_2)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_2)
-; CHECK-NEXT: vle16.v v14, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 1
-; CHECK-NEXT: vrgather.vv v16, v10, v12
-; CHECK-NEXT: vslidedown.vx v10, v14, a1
-; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT: vrgather.vv v11, v16, v10
-; CHECK-NEXT: vrgather.vv v10, v16, v14
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI1_3)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_3)
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, 6
-; CHECK-NEXT: addi a0, a0, 1830
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 819
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vrgather.vv v16, v8, v12
-; CHECK-NEXT: vslidedown.vx v8, v14, a1
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vrgather.vv v9, v16, v8
-; CHECK-NEXT: vrgather.vv v8, v16, v14
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -72,14 +50,13 @@ define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <1
; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI2_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 803
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -140,22 +117,13 @@ define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1)
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 7
+; CHECK-NEXT: addi a0, a0, 1843
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: lui a0, %hi(.LCPI5_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_1)
-; CHECK-NEXT: vrgather.vv v14, v8, v12
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: vmv.v.v v8, v14
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT: vslideup.vi v8, v14, 3, v0.t
-; CHECK-NEXT: li a0, 112
-; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vrgather.vv v14, v10, v12
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT: vslideup.vi v8, v14, 4, v0.t
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
>From b9df73811b353ab15c0e6ba5e344023e00c8e378 Mon Sep 17 00:00:00 2001
From: Ryan Buchner <buchner.ryan at gmail.com>
Date: Wed, 28 Jan 2026 12:05:25 -0800
Subject: [PATCH 03/16] [RISCV] Fix bounds in LanePoisonOrOOB
Co-authored-by: Luke Lau <luke_lau at icloud.com>
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a1474e32232e4..ee1b295ebe9ec 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5469,7 +5469,7 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
for (unsigned Idx : seq<unsigned>(NumElts)) {
int Lane = Mask[Idx];
auto LanePoisonOrOOB = [](int Lane, unsigned NumElts) -> bool {
- return Lane < 0 || Lane > (int)NumElts;
+ return Lane < 0 || Lane >= (int)NumElts;
};
// Don't handle if the index is poison or out of bounds
if (LanePoisonOrOOB(Lane, 2 * NumElts))
>From 6091470934d7a864b35e280ecf2ec74d7ba789ab Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 11:30:15 -0800
Subject: [PATCH 04/16] [RISCV] Re-precommit tests with constant pools listed
---
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 390 ++++++++++++++++--
1 file changed, 364 insertions(+), 26 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index aafb69ca03ca7..9f15e65d0e28c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -3,18 +3,53 @@
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI0_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI0_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_disjoint:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v14, (a0)
-; CHECK-NEXT: lui a0, 3
-; CHECK-NEXT: addi a0, a0, 819
-; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
-; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI0_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -24,18 +59,107 @@ define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI1_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI1_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI1_2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI1_3
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 2 # 0x2
; CHECK-LABEL: shuffle_shuffle_disjoint_unordered:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_0)
+; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_1)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v12, (a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI1_2)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_2)
+; CHECK-NEXT: vle16.v v14, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a1, a1, 1
+; CHECK-NEXT: vrgather.vv v16, v10, v12
+; CHECK-NEXT: vslidedown.vx v10, v14, a1
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v11, v16, v10
+; CHECK-NEXT: vrgather.vv v10, v16, v14
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v14, (a0)
-; CHECK-NEXT: lui a0, 3
-; CHECK-NEXT: addi a0, a0, 819
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI1_3)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_3)
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, 6
+; CHECK-NEXT: addi a0, a0, 1830
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
-; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vrgather.vv v16, v8, v12
+; CHECK-NEXT: vslidedown.vx v8, v14, a1
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v16, v8
+; CHECK-NEXT: vrgather.vv v8, v16, v14
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -45,18 +169,53 @@ define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16
; Can be optimized since the lanes are disjoint, but a single lane is used multiple times by one of the vectors
define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI2_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI2_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_duplicated_within_operand:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v14, (a0)
-; CHECK-NEXT: lui a0, 3
-; CHECK-NEXT: addi a0, a0, 803
-; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
-; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI2_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -66,6 +225,40 @@ define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <1
; Can't be optimized as merge-shuffle since the same lane (8) is used from both operands
define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI3_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI3_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_duplicated_lane:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
@@ -88,6 +281,40 @@ define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %
; Can't be optimized since shuff0 is used twice
define <16 x i16> @shuffle_shuffle_multiple_uses(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI4_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI4_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_multiple_uses:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
@@ -112,18 +339,61 @@ define <16 x i16> @shuffle_shuffle_multiple_uses(<16 x i16> %op0, <16 x i16> %op
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI5_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI5_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_unbalanced:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v14, (a0)
-; CHECK-NEXT: lui a0, 7
-; CHECK-NEXT: addi a0, a0, 1843
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
-; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: lui a0, %hi(.LCPI5_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_1)
+; CHECK-NEXT: vrgather.vv v14, v8, v12
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vmv.v.v v8, v14
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vslideup.vi v8, v14, 3, v0.t
+; CHECK-NEXT: li a0, 112
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vrgather.vv v14, v10, v12
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vslideup.vi v8, v14, 4, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -133,6 +403,40 @@ define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1)
; Can't be optimized since final one of the elements used is shuffled to a poison index
define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI6_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI6_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_poison:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
@@ -155,6 +459,40 @@ define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
; Can't be optimized since final one of the elements used is shuffled to a poison index
define <16 x i16> @shuffle_shuffle_poison2(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI7_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI7_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_poison2:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
>From 7c7130745717e9fb1245f2eff0ba69a543f8b1a9 Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 11:59:59 -0800
Subject: [PATCH 05/16] [RISCV] Update tests including constant pools
---
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 314 +++++-------------
1 file changed, 90 insertions(+), 224 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 9f15e65d0e28c..f2ab6a5441010 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -4,52 +4,34 @@
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI0_0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 4 # 0x4
-; CHECK-NEXT: .half 8 # 0x8
-; CHECK-NEXT: .half 12 # 0xc
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 9 # 0x9
-; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI0_1
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .half 11 # 0xb
-; CHECK-NEXT: .half 15 # 0xf
-; CHECK-NEXT: .half 2 # 0x2
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .half 10 # 0xa
-; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 4 # 0x4
+; CHECK-NEXT: .byte 8 # 0x8
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 14 # 0xe
; CHECK-LABEL: shuffle_shuffle_disjoint:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI0_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 819
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -60,106 +42,34 @@ define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI1_0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 4 # 0x4
-; CHECK-NEXT: .half 8 # 0x8
-; CHECK-NEXT: .half 12 # 0xc
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 9 # 0x9
-; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI1_1
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .half 11 # 0xb
-; CHECK-NEXT: .half 15 # 0xf
-; CHECK-NEXT: .half 2 # 0x2
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .half 10 # 0xa
-; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI1_2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 4 # 0x4
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 2 # 0x2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI1_3
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 4 # 0x4
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 14 # 0xe
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 4 # 0x4
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 8 # 0x8
; CHECK-LABEL: shuffle_shuffle_disjoint_unordered:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_0)
-; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_1)
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI1_2)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_2)
-; CHECK-NEXT: vle16.v v14, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 1
-; CHECK-NEXT: vrgather.vv v16, v10, v12
-; CHECK-NEXT: vslidedown.vx v10, v14, a1
-; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT: vrgather.vv v11, v16, v10
-; CHECK-NEXT: vrgather.vv v10, v16, v14
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI1_3)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI1_3)
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, 6
-; CHECK-NEXT: addi a0, a0, 1830
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 819
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vrgather.vv v16, v8, v12
-; CHECK-NEXT: vslidedown.vx v8, v14, a1
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vrgather.vv v9, v16, v8
-; CHECK-NEXT: vrgather.vv v8, v16, v14
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -170,52 +80,34 @@ define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16
; Can be optimized since the lanes are disjoint, but a single lane is used multiple times by one of the vectors
define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI2_0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 8 # 0x8
-; CHECK-NEXT: .half 12 # 0xc
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 9 # 0x9
-; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI2_1
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .half 11 # 0xb
-; CHECK-NEXT: .half 15 # 0xf
-; CHECK-NEXT: .half 2 # 0x2
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .half 10 # 0xa
-; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 8 # 0x8
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 14 # 0xe
; CHECK-LABEL: shuffle_shuffle_duplicated_within_operand:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI2_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 803
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -340,60 +232,34 @@ define <16 x i16> @shuffle_shuffle_multiple_uses(<16 x i16> %op0, <16 x i16> %op
; Can be optimized as a merge followed by a shuffle
define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI5_0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 4 # 0x4
-; CHECK-NEXT: .half 8 # 0x8
-; CHECK-NEXT: .half 12 # 0xc
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 9 # 0x9
-; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .half 10 # 0xa
-; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI5_1
-; CHECK-NEXT: .half 3 # 0x3
-; CHECK-NEXT: .half 7 # 0x7
-; CHECK-NEXT: .half 11 # 0xb
-; CHECK-NEXT: .half 15 # 0xf
-; CHECK-NEXT: .half 2 # 0x2
-; CHECK-NEXT: .half 6 # 0x6
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 4 # 0x4
+; CHECK-NEXT: .byte 8 # 0x8
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 14 # 0xe
; CHECK-LABEL: shuffle_shuffle_unbalanced:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 7
+; CHECK-NEXT: addi a0, a0, 1843
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: lui a0, %hi(.LCPI5_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_1)
-; CHECK-NEXT: vrgather.vv v14, v8, v12
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: vmv.v.v v8, v14
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT: vslideup.vi v8, v14, 3, v0.t
-; CHECK-NEXT: li a0, 112
-; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vrgather.vv v14, v10, v12
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT: vslideup.vi v8, v14, 4, v0.t
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
>From d8fd0c327a1366fd01db18c442cf6921d15cb06f Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 13:56:09 -0800
Subject: [PATCH 06/16] [RISCV] Use TrueMask for Gather operation
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++--
.../test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ee1b295ebe9ec..eff5a85f7bbb6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5496,7 +5496,7 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
}
MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
- auto [_, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+ auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
// Create the mask for the initial merge
auto XLenVT = Subtarget.getXLenVT();
@@ -5536,7 +5536,7 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
SDValue Gather = DAG.getNode(
RISCVISD::VRGATHER_VV_VL, DL, ContainerVT, Merge,
convertToScalableVector(ContainerVT, GatherMask, DAG, Subtarget),
- DAG.getUNDEF(ContainerVT), DAG.getUNDEF(MaskContainerVT), VL);
+ DAG.getUNDEF(ContainerVT), TrueMask, VL);
return convertFromScalableVector(VT, Gather, DAG, Subtarget);
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index f2ab6a5441010..318732243cc7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -31,7 +31,7 @@ define <16 x i16> @shuffle_shuffle_disjoint(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -69,7 +69,7 @@ define <16 x i16> @shuffle_shuffle_disjoint_unordered(<16 x i16> %op0, <16 x i16
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -107,7 +107,7 @@ define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <1
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -259,7 +259,7 @@ define <16 x i16> @shuffle_shuffle_unbalanced(<16 x i16> %op0, <16 x i16> %op1)
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
; CHECK-NEXT: vsext.vf2 v12, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
>From 065f86e3df7d37e89ae6f6375cb3b8ae7116018c Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 13:42:02 -0800
Subject: [PATCH 07/16] [RISCV] Pre-commit tests for overlapping lanes
---
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 118 ++++++++++++++++++
1 file changed, 118 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 318732243cc7e..052ee3d023510 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -378,3 +378,121 @@ define <16 x i16> @shuffle_shuffle_poison2(<16 x i16> %op0, <16 x i16> %op1) {
%merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 poison, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
ret <16 x i16> %merge
}
+
+; Can't be optimized since first shuffle uses from two different operands
+define <16 x i16> @shuffle_shuffle_first_multi_shuffle(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI8_0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: LCPI8_1
+; CHECK-NEXT: .half 3 # 0x3
+; CHECK-NEXT: .half 7 # 0x7
+; CHECK-NEXT: .half 11 # 0xb
+; CHECK-NEXT: .half 15 # 0xf
+; CHECK-NEXT: .half 2 # 0x2
+; CHECK-NEXT: .half 6 # 0x6
+; CHECK-NEXT: .half 10 # 0xa
+; CHECK-NEXT: .half 14 # 0xe
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-NEXT: .zero 2
+; CHECK-LABEL: shuffle_shuffle_first_multi_shuffle:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI8_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_1)
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vrgather.vi v12, v10, 0, v0.t
+; CHECK-NEXT: vrgather.vv v8, v10, v16
+; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> %op1, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 16, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
+
+; Can optimize, the first two shuffles use the same lanes, but only a disjoint
+; set of lanes is used for the final shuffle
+define <16 x i16> @shuffle_shuffle_duplicates_not_used(<16 x i16> %op0, <16 x i16> %op1) {
+; CHECK-LABEL: LCPI9_0
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 4 # 0x4
+; CHECK-NEXT: .byte 8 # 0x8
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 14 # 0xe
+; CHECK-LABEL: LCPI9_1
+; CHECK-NEXT: .byte 3 # 0x3
+; CHECK-NEXT: .byte 7 # 0x7
+; CHECK-NEXT: .byte 11 # 0xb
+; CHECK-NEXT: .byte 15 # 0xf
+; CHECK-NEXT: .byte 2 # 0x2
+; CHECK-NEXT: .byte 6 # 0x6
+; CHECK-NEXT: .byte 10 # 0xa
+; CHECK-NEXT: .byte 14 # 0xe
+; CHECK-NEXT: .byte 0 # 0x0
+; CHECK-NEXT: .byte 4 # 0x4
+; CHECK-NEXT: .byte 8 # 0x8
+; CHECK-NEXT: .byte 12 # 0xc
+; CHECK-NEXT: .byte 1 # 0x1
+; CHECK-NEXT: .byte 5 # 0x5
+; CHECK-NEXT: .byte 9 # 0x9
+; CHECK-NEXT: .byte 13 # 0xd
+; CHECK-LABEL: shuffle_shuffle_duplicates_not_used:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
+; CHECK-NEXT: vle8.v v16, (a0)
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vrgather.vv v12, v8, v14
+; CHECK-NEXT: vsext.vf2 v8, v16
+; CHECK-NEXT: vrgather.vv v14, v10, v8
+; CHECK-NEXT: vslideup.vi v12, v14, 8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14>
+ %shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13>
+ %merge = shufflevector <16 x i16> %shuff0, <16 x i16> %shuff1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i16> %merge
+}
>From e60752d2dfd940cdb902526f84fba2897ff4ae7b Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 16:33:45 -0800
Subject: [PATCH 08/16] [RISCV] Update overlapping lanes tests
---
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 34 ++++---------------
1 file changed, 7 insertions(+), 27 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 052ee3d023510..1796351724442 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -458,38 +458,18 @@ define <16 x i16> @shuffle_shuffle_duplicates_not_used(<16 x i16> %op0, <16 x i1
; CHECK-NEXT: .byte 6 # 0x6
; CHECK-NEXT: .byte 10 # 0xa
; CHECK-NEXT: .byte 14 # 0xe
-; CHECK-LABEL: LCPI9_1
-; CHECK-NEXT: .byte 3 # 0x3
-; CHECK-NEXT: .byte 7 # 0x7
-; CHECK-NEXT: .byte 11 # 0xb
-; CHECK-NEXT: .byte 15 # 0xf
-; CHECK-NEXT: .byte 2 # 0x2
-; CHECK-NEXT: .byte 6 # 0x6
-; CHECK-NEXT: .byte 10 # 0xa
-; CHECK-NEXT: .byte 14 # 0xe
-; CHECK-NEXT: .byte 0 # 0x0
-; CHECK-NEXT: .byte 4 # 0x4
-; CHECK-NEXT: .byte 8 # 0x8
-; CHECK-NEXT: .byte 12 # 0xc
-; CHECK-NEXT: .byte 1 # 0x1
-; CHECK-NEXT: .byte 5 # 0x5
-; CHECK-NEXT: .byte 9 # 0x9
-; CHECK-NEXT: .byte 13 # 0xd
; CHECK-LABEL: shuffle_shuffle_duplicates_not_used:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v12, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
-; CHECK-NEXT: vle8.v v16, (a0)
-; CHECK-NEXT: vsext.vf2 v14, v12
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vsext.vf2 v8, v16
-; CHECK-NEXT: vrgather.vv v14, v10, v8
-; CHECK-NEXT: vslideup.vi v12, v14, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle8.v v14, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 819
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vsext.vf2 v12, v14
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13>
>From 67b4f6f5864c4c141fa5bad488b6e8350a860040 Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 28 Jan 2026 15:48:41 -0800
Subject: [PATCH 09/16] [RISCV] Still optimize poison lane cases
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 30 +++++----
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 64 ++++---------------
2 files changed, 31 insertions(+), 63 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index eff5a85f7bbb6..2778af1066166 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5468,12 +5468,9 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
SmallVector<unsigned> ShuffleLaneUses(NumElts, 0);
for (unsigned Idx : seq<unsigned>(NumElts)) {
int Lane = Mask[Idx];
- auto LanePoisonOrOOB = [](int Lane, unsigned NumElts) -> bool {
- return Lane < 0 || Lane >= (int)NumElts;
- };
- // Don't handle if the index is poison or out of bounds
- if (LanePoisonOrOOB(Lane, 2 * NumElts))
- return SDValue();
+ // Don't assign if poison
+ if (Lane == -1)
+ continue;
unsigned OpNum;
int OrigLane;
if ((unsigned)Lane < NumElts) {
@@ -5483,9 +5480,10 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
OpNum = 2;
OrigLane = V2Mask[Lane - NumElts];
}
- // Don't handle if the index is poison or if shuffling from a second
- // operand
- if (LanePoisonOrOOB(OrigLane, NumElts))
+ if (OrigLane == -1)
+ continue;
+ // Don't handle if shuffling from a second operand
+ if ((unsigned)OrigLane >= NumElts)
return SDValue();
const unsigned CurrLaneSrc = ShuffleLaneUses[OrigLane];
@@ -5524,12 +5522,20 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
// Create the constant vector for the gather
SmallVector<SDValue> GatherVals(NumElts);
for (unsigned Idx : seq<unsigned>(NumElts)) {
- // In bounds checks for Mask done already
int Lane = Mask[Idx];
+ if (Lane == -1) {
+ GatherVals[Idx] = DAG.getPOISON(XLenVT);
+ continue;
+ }
+ int SecondLane;
if ((unsigned)Lane < NumElts)
- GatherVals[Idx] = DAG.getConstant(V1Mask[Lane], DL, XLenVT);
+ SecondLane = V1Mask[Lane];
+ else
+ SecondLane = V2Mask[Lane - NumElts];
+ if (SecondLane == -1)
+ GatherVals[Idx] = DAG.getPOISON(XLenVT);
else
- GatherVals[Idx] = DAG.getConstant(V2Mask[Lane - NumElts], DL, XLenVT);
+ GatherVals[Idx] = DAG.getConstant(SecondLane, DL, XLenVT);
}
SDValue GatherMask = DAG.getBuildVector(VT, DL, GatherVals);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 1796351724442..1027860d74108 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -278,15 +278,6 @@ define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-NEXT: .half 5 # 0x5
; CHECK-NEXT: .half 9 # 0x9
; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI6_1
; CHECK-NEXT: .half 3 # 0x3
; CHECK-NEXT: .half 7 # 0x7
; CHECK-NEXT: .half 11 # 0xb
@@ -295,27 +286,17 @@ define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-NEXT: .half 6 # 0x6
; CHECK-NEXT: .half 10 # 0xa
; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_poison:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI6_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 803
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 poison, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
@@ -327,22 +308,13 @@ define <16 x i16> @shuffle_shuffle_poison(<16 x i16> %op0, <16 x i16> %op1) {
define <16 x i16> @shuffle_shuffle_poison2(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI7_0
; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 4 # 0x4
+; CHECK-NEXT: .zero 2
; CHECK-NEXT: .half 8 # 0x8
; CHECK-NEXT: .half 12 # 0xc
; CHECK-NEXT: .half 1 # 0x1
; CHECK-NEXT: .half 5 # 0x5
; CHECK-NEXT: .half 9 # 0x9
; CHECK-NEXT: .half 13 # 0xd
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI7_1
; CHECK-NEXT: .half 3 # 0x3
; CHECK-NEXT: .half 7 # 0x7
; CHECK-NEXT: .half 11 # 0xb
@@ -351,27 +323,17 @@ define <16 x i16> @shuffle_shuffle_poison2(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-NEXT: .half 6 # 0x6
; CHECK-NEXT: .half 10 # 0xa
; CHECK-NEXT: .half 14 # 0xe
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
-; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_poison2:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_0)
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: lui a0, %hi(.LCPI7_1)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_1)
-; CHECK-NEXT: vle16.v v16, (a0)
-; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
-; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: lui a0, 3
+; CHECK-NEXT: addi a0, a0, 803
+; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0
+; CHECK-NEXT: vrgather.vv v8, v10, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
%shuff1 = shufflevector <16 x i16> %op1, <16 x i16> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 2, i32 6, i32 10, i32 14, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
>From 4508fba27147f5ad9b868c32416382feb34e599d Mon Sep 17 00:00:00 2001
From: Ryan Buchner <buchner.ryan at gmail.com>
Date: Thu, 29 Jan 2026 09:35:45 -0800
Subject: [PATCH 10/16] [RISCV] auto -> MVT
Co-authored-by: Luke Lau <luke_lau at icloud.com>
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2778af1066166..daaa376be3354 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5497,7 +5497,7 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
// Create the mask for the initial merge
- auto XLenVT = Subtarget.getXLenVT();
+ MVT XLenVT = Subtarget.getXLenVT();
SmallVector<SDValue> MergeMaskVals(NumElts);
for (unsigned Idx : seq<unsigned>(NumElts)) {
// If lane not used from either operand, use poison
>From e707bc93a7dfdbfed1a8af0927d7b50c5041868f Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Thu, 29 Jan 2026 10:01:02 -0800
Subject: [PATCH 11/16] [RISCV][NFC] Reorder code in
lowerDisjointIndicesShuffle()
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index daaa376be3354..f5298e99828f8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6073,10 +6073,6 @@ static bool isCompressMask(ArrayRef<int> Mask) {
static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
- MVT VT = SVN->getSimpleValueType(0);
- MVT XLenVT = Subtarget.getXLenVT();
- SDLoc DL(SVN);
-
const ArrayRef<int> Mask = SVN->getMask();
// Work out which source each lane will come from.
@@ -6095,6 +6091,9 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
return SDValue();
}
+ MVT VT = SVN->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(SVN);
SmallVector<SDValue> SelectMaskVals;
for (int Lane : Srcs) {
if (Lane == -1)
>From a01807c354e8ce7b770bb497e73a2d52d26e1fc8 Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Thu, 29 Jan 2026 10:05:55 -0800
Subject: [PATCH 12/16] [RISCV][NFC] Split off logic for creating
Vselect-Shuffle sequence from lowerDisjointIndicesShuffle()
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 43 +++++++++++++--------
1 file changed, 27 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f5298e99828f8..dc03ec24c3601 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5439,6 +5439,30 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
+// Handle the lowering of disjoint shuffles to Vselect-Shuffle sequences
+// where the shuffle is a single-op shuffle
+// Can be lower to VMERGE followed by VRGATHER
+static SDValue lowerShuffleMaskToVselectShuffle(
+ const ShuffleVectorSDNode *SVN, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget, const SmallVector<int, 16> &Srcs,
+ const SmallVector<int> &NewMask, SDValue V1, SDValue V2) {
+ MVT VT = SVN->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(SVN);
+ SmallVector<SDValue> SelectMaskVals;
+ for (int Lane : Srcs) {
+ if (Lane == -1)
+ SelectMaskVals.push_back(DAG.getUNDEF(XLenVT));
+ else
+ SelectMaskVals.push_back(DAG.getConstant(Lane ? 0 : 1, DL, XLenVT));
+ }
+ MVT MaskVT = VT.changeVectorElementType(MVT::i1);
+ SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, SelectMaskVals);
+ SDValue Select = DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
+
+ return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
+}
+
// A shuffle of shuffles where:
// - the first level of shuffles both only used data from a single input
// - the final output doesn't use data from the same lane of both operands
@@ -6091,21 +6115,6 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
return SDValue();
}
- MVT VT = SVN->getSimpleValueType(0);
- MVT XLenVT = Subtarget.getXLenVT();
- SDLoc DL(SVN);
- SmallVector<SDValue> SelectMaskVals;
- for (int Lane : Srcs) {
- if (Lane == -1)
- SelectMaskVals.push_back(DAG.getUNDEF(XLenVT));
- else
- SelectMaskVals.push_back(DAG.getConstant(Lane ? 0 : 1, DL, XLenVT));
- }
- MVT MaskVT = VT.changeVectorElementType(MVT::i1);
- SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, SelectMaskVals);
- SDValue Select = DAG.getNode(ISD::VSELECT, DL, VT, SelectMask,
- SVN->getOperand(0), SVN->getOperand(1));
-
// Move all indices relative to the first source.
SmallVector<int> NewMask(Mask.size());
for (unsigned I = 0; I < Mask.size(); I++) {
@@ -6115,7 +6124,9 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
NewMask[I] = Mask[I] % Mask.size();
}
- return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
+ return lowerShuffleMaskToVselectShuffle(SVN, DAG, Subtarget, Srcs, NewMask,
+ SVN->getOperand(0),
+ SVN->getOperand(1));
}
/// Is this mask local (i.e. elements only move within their local span), and
>From c4357d7f84fff6958f6651d1ff1c26789707cdeb Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Thu, 29 Jan 2026 10:41:40 -0800
Subject: [PATCH 13/16] [RISCV] Refactor lowerVECTOR_SHUFFLEAsMergeGather() to
use newly created lowerShuffleMaskToVselectShuffle()
Now doesn't directly create MERGE and GATHER, creates vselect and shuffle instead.
End codegen stays the same.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 65 ++++++---------------
1 file changed, 18 insertions(+), 47 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dc03ec24c3601..f23152bf52d65 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5467,7 +5467,7 @@ static SDValue lowerShuffleMaskToVselectShuffle(
// - the first level of shuffles both only used data from a single input
// - the final output doesn't use data from the same lane of both operands
// This can be lowered to a MERGE followed by a GATHER
-static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
+static SDValue lowerVECTOR_SHUFFLEAsMergeGather(ShuffleVectorSDNode *SVN,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const RISCVSubtarget &Subtarget,
@@ -5483,25 +5483,25 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
if (V1.getOperand(0) == V2.getOperand(0))
return SDValue();
- unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumElts = Mask.size();
auto *SVN1 = cast<ShuffleVectorSDNode>(V1.getNode());
auto *SVN2 = cast<ShuffleVectorSDNode>(V2.getNode());
auto V1Mask = SVN1->getMask();
auto V2Mask = SVN2->getMask();
- // 0: Not set, 1: Set by V1, 2: Set by V2
- SmallVector<unsigned> ShuffleLaneUses(NumElts, 0);
+ // -1: Not set, 0: Set by V1, 1: Set by V2
+ SmallVector<int, 16> ShuffleLaneUses(NumElts, -1);
for (unsigned Idx : seq<unsigned>(NumElts)) {
int Lane = Mask[Idx];
// Don't assign if poison
if (Lane == -1)
continue;
- unsigned OpNum;
+ int OpNum;
int OrigLane;
if ((unsigned)Lane < NumElts) {
- OpNum = 1;
+ OpNum = 0;
OrigLane = V1Mask[Lane];
} else {
- OpNum = 2;
+ OpNum = 1;
OrigLane = V2Mask[Lane - NumElts];
}
if (OrigLane == -1)
@@ -5510,45 +5510,19 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
if ((unsigned)OrigLane >= NumElts)
return SDValue();
- const unsigned CurrLaneSrc = ShuffleLaneUses[OrigLane];
+ const int CurrLaneSrc = ShuffleLaneUses[OrigLane];
// Can't use the same lane from both operands in the merge
- if (CurrLaneSrc != 0 && CurrLaneSrc != OpNum)
+ if (CurrLaneSrc != -1 && CurrLaneSrc != OpNum)
return SDValue();
ShuffleLaneUses[OrigLane] = OpNum;
}
- MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
- auto [TrueMask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
-
- // Create the mask for the initial merge
- MVT XLenVT = Subtarget.getXLenVT();
- SmallVector<SDValue> MergeMaskVals(NumElts);
- for (unsigned Idx : seq<unsigned>(NumElts)) {
- // If lane not used from either operand, use poison
- if (ShuffleLaneUses[Idx] == 0)
- MergeMaskVals[Idx] = DAG.getPOISON(XLenVT);
- else
- MergeMaskVals[Idx] =
- DAG.getConstant(ShuffleLaneUses[Idx] == 1 ? 1 : 0, DL, XLenVT);
- }
- MVT MergeMaskVT = MVT::getVectorVT(MVT::i1, NumElts);
- SDValue MergeMask = DAG.getBuildVector(MergeMaskVT, DL, MergeMaskVals);
- MVT MaskContainerVT =
- getContainerForFixedLengthVector(DAG, MergeMaskVT, Subtarget);
-
- SDValue Merge = DAG.getNode(
- RISCVISD::VMERGE_VL, DL, ContainerVT,
- convertToScalableVector(MaskContainerVT, MergeMask, DAG, Subtarget),
- convertToScalableVector(ContainerVT, V1.getOperand(0), DAG, Subtarget),
- convertToScalableVector(ContainerVT, V2.getOperand(0), DAG, Subtarget),
- DAG.getUNDEF(ContainerVT), VL);
-
// Create the constant vector for the gather
- SmallVector<SDValue> GatherVals(NumElts);
+ SmallVector<int> GatherVals(NumElts);
for (unsigned Idx : seq<unsigned>(NumElts)) {
int Lane = Mask[Idx];
if (Lane == -1) {
- GatherVals[Idx] = DAG.getPOISON(XLenVT);
+ GatherVals[Idx] = -1;
continue;
}
int SecondLane;
@@ -5557,17 +5531,14 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(const SDLoc &DL, MVT VT,
else
SecondLane = V2Mask[Lane - NumElts];
if (SecondLane == -1)
- GatherVals[Idx] = DAG.getPOISON(XLenVT);
+ GatherVals[Idx] = -1;
else
- GatherVals[Idx] = DAG.getConstant(SecondLane, DL, XLenVT);
+ GatherVals[Idx] = SecondLane;
}
- SDValue GatherMask = DAG.getBuildVector(VT, DL, GatherVals);
- SDValue Gather = DAG.getNode(
- RISCVISD::VRGATHER_VV_VL, DL, ContainerVT, Merge,
- convertToScalableVector(ContainerVT, GatherMask, DAG, Subtarget),
- DAG.getUNDEF(ContainerVT), TrueMask, VL);
- return convertFromScalableVector(VT, Gather, DAG, Subtarget);
+ return lowerShuffleMaskToVselectShuffle(SVN, DAG, Subtarget, ShuffleLaneUses,
+ GatherVals, V1->getOperand(0),
+ V2.getOperand(0));
}
/// Match v(f)slide1up/down idioms. These operations involve sliding
@@ -6336,8 +6307,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
- if (SDValue V = lowerVECTOR_SHUFFLEAsMergeGather(DL, VT, V1, V2, Mask,
- Subtarget, DAG))
+ if (SDValue V =
+ lowerVECTOR_SHUFFLEAsMergeGather(SVN, V1, V2, Mask, Subtarget, DAG))
return V;
// A bitrotate will be one instruction on Zvkb, so try to lower to it first if
>From 0958506c16402c560726f50e03f51f4c920448be Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Wed, 4 Feb 2026 23:03:37 -0800
Subject: [PATCH 14/16] [RISCV] Rather than handle disjoint case for shuffle of
shuffles, just compress into single shuffle
Existing logic for disjoint shuffle can lower as needed.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 69 ++++++-------------
.../RISCV/rvv/fixed-vectors-shuffle-int.ll | 9 +--
.../RISCV/rvv/fixed-vectors-shuffle-merge.ll | 31 +++++----
3 files changed, 39 insertions(+), 70 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f23152bf52d65..34299b6b8b097 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5463,15 +5463,12 @@ static SDValue lowerShuffleMaskToVselectShuffle(
return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
}
-// A shuffle of shuffles where:
-// - the first level of shuffles both only used data from a single input
-// - the final output doesn't use data from the same lane of both operands
-// This can be lowered to a MERGE followed by a GATHER
-static SDValue lowerVECTOR_SHUFFLEAsMergeGather(ShuffleVectorSDNode *SVN,
- SDValue V1, SDValue V2,
- ArrayRef<int> Mask,
- const RISCVSubtarget &Subtarget,
- SelectionDAG &DAG) {
+// A shuffle of shuffles where the final data only is drawn from 2 input ops
+// can be compressed into a single shuffle
+static SDValue compressShuffleOfShuffles(ShuffleVectorSDNode *SVN, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
if (V1.getOpcode() != ISD::VECTOR_SHUFFLE ||
V2.getOpcode() != ISD::VECTOR_SHUFFLE)
return SDValue();
@@ -5479,66 +5476,40 @@ static SDValue lowerVECTOR_SHUFFLEAsMergeGather(ShuffleVectorSDNode *SVN,
if (!V1.hasOneUse() || !V2.hasOneUse())
return SDValue();
- // Can just be reduced into a single gather operation
- if (V1.getOperand(0) == V2.getOperand(0))
- return SDValue();
-
unsigned NumElts = Mask.size();
auto *SVN1 = cast<ShuffleVectorSDNode>(V1.getNode());
auto *SVN2 = cast<ShuffleVectorSDNode>(V2.getNode());
auto V1Mask = SVN1->getMask();
auto V2Mask = SVN2->getMask();
- // -1: Not set, 0: Set by V1, 1: Set by V2
- SmallVector<int, 16> ShuffleLaneUses(NumElts, -1);
+ SmallVector<int> NewMask(NumElts, -1);
for (unsigned Idx : seq<unsigned>(NumElts)) {
int Lane = Mask[Idx];
// Don't assign if poison
if (Lane == -1)
continue;
- int OpNum;
int OrigLane;
+ bool SecondOp = false;
if ((unsigned)Lane < NumElts) {
- OpNum = 0;
OrigLane = V1Mask[Lane];
} else {
- OpNum = 1;
OrigLane = V2Mask[Lane - NumElts];
+ SecondOp = true;
}
if (OrigLane == -1)
continue;
// Don't handle if shuffling from a second operand
if ((unsigned)OrigLane >= NumElts)
return SDValue();
-
- const int CurrLaneSrc = ShuffleLaneUses[OrigLane];
- // Can't use the same lane from both operands in the merge
- if (CurrLaneSrc != -1 && CurrLaneSrc != OpNum)
- return SDValue();
- ShuffleLaneUses[OrigLane] = OpNum;
+ if (SecondOp)
+ OrigLane += NumElts;
+ NewMask[Idx] = OrigLane;
}
- // Create the constant vector for the gather
- SmallVector<int> GatherVals(NumElts);
- for (unsigned Idx : seq<unsigned>(NumElts)) {
- int Lane = Mask[Idx];
- if (Lane == -1) {
- GatherVals[Idx] = -1;
- continue;
- }
- int SecondLane;
- if ((unsigned)Lane < NumElts)
- SecondLane = V1Mask[Lane];
- else
- SecondLane = V2Mask[Lane - NumElts];
- if (SecondLane == -1)
- GatherVals[Idx] = -1;
- else
- GatherVals[Idx] = SecondLane;
- }
+ MVT VT = SVN->getSimpleValueType(0);
+ SDLoc DL(SVN);
- return lowerShuffleMaskToVselectShuffle(SVN, DAG, Subtarget, ShuffleLaneUses,
- GatherVals, V1->getOperand(0),
- V2.getOperand(0));
+ return DAG.getVectorShuffle(VT, DL, V1->getOperand(0), V2->getOperand(0),
+ NewMask);
}
/// Match v(f)slide1up/down idioms. These operations involve sliding
@@ -6182,6 +6153,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
unsigned NumElts = VT.getVectorNumElements();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
+ if (SDValue V = compressShuffleOfShuffles(SVN, V1, V2, SVN->getMask(),
+ Subtarget, DAG))
+ return V;
+
if (VT.getVectorElementType() == MVT::i1) {
// Lower to a vror.vi of a larger element type if possible before we promote
// i1s to i8s.
@@ -6307,10 +6282,6 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
- if (SDValue V =
- lowerVECTOR_SHUFFLEAsMergeGather(SVN, V1, V2, Mask, Subtarget, DAG))
- return V;
-
// A bitrotate will be one instruction on Zvkb, so try to lower to it first if
// available.
if (Subtarget.hasStdExtZvkb())
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll
index 5683476852683..e398a858684b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll
@@ -295,12 +295,9 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
; CHECK-LABEL: interleave_shuffles:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vrgather.vi v9, v8, 0
-; CHECK-NEXT: vrgather.vi v10, v8, 1
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT: vwaddu.vv v8, v9, v10
-; CHECK-NEXT: li a0, -1
-; CHECK-NEXT: vwmaccu.vx v8, a0, v10
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vslideup.vi v9, v8, 2
+; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%y = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
%z = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
index 1027860d74108..e571dd6fde84c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-merge.ll
@@ -118,14 +118,6 @@ define <16 x i16> @shuffle_shuffle_duplicated_within_operand(<16 x i16> %op0, <1
; Can't be optimized as merge-shuffle since the same lane (8) is used from both operands
define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %op1) {
; CHECK-LABEL: LCPI3_0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 0 # 0x0
-; CHECK-NEXT: .half 8 # 0x8
-; CHECK-NEXT: .half 12 # 0xc
-; CHECK-NEXT: .half 1 # 0x1
-; CHECK-NEXT: .half 5 # 0x5
-; CHECK-NEXT: .half 9 # 0x9
-; CHECK-NEXT: .half 13 # 0xd
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
@@ -134,7 +126,6 @@ define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
-; CHECK-LABEL: LCPI3_1
; CHECK-NEXT: .half 3 # 0x3
; CHECK-NEXT: .half 7 # 0x7
; CHECK-NEXT: .half 8 # 0x8
@@ -143,6 +134,15 @@ define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %
; CHECK-NEXT: .half 6 # 0x6
; CHECK-NEXT: .half 10 # 0xa
; CHECK-NEXT: .half 14 # 0xe
+; CHECK-LABEL: LCPI3_1
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 0 # 0x0
+; CHECK-NEXT: .half 8 # 0x8
+; CHECK-NEXT: .half 12 # 0xc
+; CHECK-NEXT: .half 1 # 0x1
+; CHECK-NEXT: .half 5 # 0x5
+; CHECK-NEXT: .half 9 # 0x9
+; CHECK-NEXT: .half 13 # 0xd
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
; CHECK-NEXT: .zero 2
@@ -153,16 +153,17 @@ define <16 x i16> @shuffle_shuffle_duplicated_lane(<16 x i16> %op0, <16 x i16> %
; CHECK-NEXT: .zero 2
; CHECK-LABEL: shuffle_shuffle_duplicated_lane:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_0)
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v14, (a0)
; CHECK-NEXT: lui a0, %hi(.LCPI3_1)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_1)
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT: vle16.v v14, (a0)
+; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_0)
; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: li a0, -256
+; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vrgather.vv v12, v8, v14
-; CHECK-NEXT: vrgather.vv v8, v10, v16
-; CHECK-NEXT: vslideup.vi v12, v8, 8
+; CHECK-NEXT: vrgather.vv v12, v10, v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%shuff0 = shufflevector <16 x i16> %op0, <16 x i16> poison, <16 x i32> <i32 0, i32 0, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
>From 1aae3f950c9515b31e0e769550dad805c02bf42b Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Thu, 5 Feb 2026 00:26:22 -0800
Subject: [PATCH 15/16] Revert "[RISCV][NFC] Split off logic for creating
Vselect-Shuffle sequence from lowerDisjointIndicesShuffle()"
This reverts commit a01807c354e8ce7b770bb497e73a2d52d26e1fc8.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 43 ++++++++-------------
1 file changed, 16 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 34299b6b8b097..a8691371465cb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5439,30 +5439,6 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
-// Handle the lowering of disjoint shuffles to Vselect-Shuffle sequences
-// where the shuffle is a single-op shuffle
-// Can be lower to VMERGE followed by VRGATHER
-static SDValue lowerShuffleMaskToVselectShuffle(
- const ShuffleVectorSDNode *SVN, SelectionDAG &DAG,
- const RISCVSubtarget &Subtarget, const SmallVector<int, 16> &Srcs,
- const SmallVector<int> &NewMask, SDValue V1, SDValue V2) {
- MVT VT = SVN->getSimpleValueType(0);
- MVT XLenVT = Subtarget.getXLenVT();
- SDLoc DL(SVN);
- SmallVector<SDValue> SelectMaskVals;
- for (int Lane : Srcs) {
- if (Lane == -1)
- SelectMaskVals.push_back(DAG.getUNDEF(XLenVT));
- else
- SelectMaskVals.push_back(DAG.getConstant(Lane ? 0 : 1, DL, XLenVT));
- }
- MVT MaskVT = VT.changeVectorElementType(MVT::i1);
- SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, SelectMaskVals);
- SDValue Select = DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
-
- return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
-}
-
// A shuffle of shuffles where the final data only is drawn from 2 input ops
// can be compressed into a single shuffle
static SDValue compressShuffleOfShuffles(ShuffleVectorSDNode *SVN, SDValue V1,
@@ -6057,6 +6033,21 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
return SDValue();
}
+ MVT VT = SVN->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(SVN);
+ SmallVector<SDValue> SelectMaskVals;
+ for (int Lane : Srcs) {
+ if (Lane == -1)
+ SelectMaskVals.push_back(DAG.getUNDEF(XLenVT));
+ else
+ SelectMaskVals.push_back(DAG.getConstant(Lane ? 0 : 1, DL, XLenVT));
+ }
+ MVT MaskVT = VT.changeVectorElementType(MVT::i1);
+ SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, SelectMaskVals);
+ SDValue Select = DAG.getNode(ISD::VSELECT, DL, VT, SelectMask,
+ SVN->getOperand(0), SVN->getOperand(1));
+
// Move all indices relative to the first source.
SmallVector<int> NewMask(Mask.size());
for (unsigned I = 0; I < Mask.size(); I++) {
@@ -6066,9 +6057,7 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
NewMask[I] = Mask[I] % Mask.size();
}
- return lowerShuffleMaskToVselectShuffle(SVN, DAG, Subtarget, Srcs, NewMask,
- SVN->getOperand(0),
- SVN->getOperand(1));
+ return DAG.getVectorShuffle(VT, DL, Select, DAG.getUNDEF(VT), NewMask);
}
/// Is this mask local (i.e. elements only move within their local span), and
>From f8ae6d4b705d903656f876723a4fad103ba9c2d7 Mon Sep 17 00:00:00 2001
From: bababuck <rbuchner at qti.qualcomm.com>
Date: Thu, 5 Feb 2026 00:33:01 -0800
Subject: [PATCH 16/16] Revert "[RISCV][NFC] Reorder code in
lowerDisjointIndicesShuffle()"
This reverts commit e707bc93a7dfdbfed1a8af0927d7b50c5041868f.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a8691371465cb..23f0d9935e5b2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6015,6 +6015,10 @@ static bool isCompressMask(ArrayRef<int> Mask) {
static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
+ MVT VT = SVN->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ SDLoc DL(SVN);
+
const ArrayRef<int> Mask = SVN->getMask();
// Work out which source each lane will come from.
@@ -6033,9 +6037,6 @@ static SDValue lowerDisjointIndicesShuffle(ShuffleVectorSDNode *SVN,
return SDValue();
}
- MVT VT = SVN->getSimpleValueType(0);
- MVT XLenVT = Subtarget.getXLenVT();
- SDLoc DL(SVN);
SmallVector<SDValue> SelectMaskVals;
for (int Lane : Srcs) {
if (Lane == -1)
More information about the llvm-commits
mailing list