[llvm] [VectorCombine] Consolidate fragmented loads from shuffle chains into wide loads (PR #177571)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 23 03:40:45 PST 2026


https://github.com/ParkHanbum updated https://github.com/llvm/llvm-project/pull/177571

>From 22325d9c6f22e3eb0ada0125272439be1bff5bdf Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Sat, 10 Jan 2026 19:29:33 +0900
Subject: [PATCH 1/2] add testcases for upcoming patch

---
 .../X86/shuffle-of-fragment-loads.ll          | 494 ++++++++++++++++++
 1 file changed, 494 insertions(+)
 create mode 100644 llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll

diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll
new file mode 100644
index 0000000000000..858ddeb2e2943
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll
@@ -0,0 +1,494 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX
+
+declare void @use(<2 x i32>)
+
+; ============================================================================
+; Basic functionality (Type Match, Widen)
+; ============================================================================
+
+define <8 x float> @test_basic_group_float(ptr %p) {
+; CHECK-LABEL: define <8 x float> @test_basic_group_float(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <4 x float>, ptr [[P]], align 16
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 32
+; CHECK-NEXT:    [[L2:%.*]] = load <4 x float>, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[L1]], <4 x float> [[L2]], <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 2, i32 3, i32 6, i32 7>
+; CHECK-NEXT:    ret <8 x float> [[RES]]
+;
+  %l1 = load <4 x float>, ptr %p, align 16
+  %gep = getelementptr inbounds i8, ptr %p, i64 32
+  %l2 = load <4 x float>, ptr %gep, align 16
+  %shuf1 = shufflevector <4 x float> %l1, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+  %shuf2 = shufflevector <4 x float> %l2, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+  %res = shufflevector <8 x float> %shuf1, <8 x float> %shuf2, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 2, i32 3, i32 10, i32 11>
+  ret <8 x float> %res
+}
+
+define <8 x i32> @test_reordered_load(ptr %p) {
+; CHECK-LABEL: define <8 x i32> @test_reordered_load(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <4 x i32>, ptr [[P]], align 16
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L2:%.*]] = load <4 x i32>, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[L2]], <4 x i32> [[L1]], <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 2, i32 3, i32 6, i32 7>
+; CHECK-NEXT:    ret <8 x i32> [[RES]]
+;
+  %l1 = load <4 x i32>, ptr %p, align 16
+  %gep = getelementptr inbounds i8, ptr %p, i64 16
+  %l2 = load <4 x i32>, ptr %gep, align 16
+  %res = shufflevector <4 x i32> %l2, <4 x i32> %l1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 2, i32 3, i32 6, i32 7>
+  ret <8 x i32> %res
+}
+
+define <4 x float> @test_poison_pad(ptr %p) {
+; CHECK-LABEL: define <4 x float> @test_poison_pad(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x float>, ptr [[P]], align 8
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L2:%.*]] = load <2 x float>, ptr [[GEP]], align 8
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[L1]], <2 x float> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x float> [[RES]]
+;
+  %l1 = load <2 x float>, ptr %p, align 8
+  %gep = getelementptr inbounds i8, ptr %p, i64 16
+  %l2 = load <2 x float>, ptr %gep, align 8
+  %l1_pad = shufflevector <2 x float> %l1, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+  %l2_pad = shufflevector <2 x float> %l2, <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+  %res = shufflevector <4 x float> %l1_pad, <4 x float> %l2_pad, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  ret <4 x float> %res
+}
+
+define <4 x float> @test_single_load(ptr %p) {
+; CHECK-LABEL: define <4 x float> @test_single_load(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <4 x float>, ptr [[P]], align 16
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[L1]], <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT:    ret <4 x float> [[RES]]
+;
+  %l1 = load <4 x float>, ptr %p
+  %tmp = shufflevector <4 x float> %l1, <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x float> %tmp, <4 x float> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  ret <4 x float> %res
+}
+
+; ====================================================================================
+; Positive : Consolidate multiple fragmented memory loads and subsequent shuffles into
+; one or two wide loads and a single shuffle operation.
+; ====================================================================================
+
+define <4 x double> @test_double_4_to_2(ptr %x, ptr %y) {
+; CHECK-LABEL: define <4 x double> @test_double_4_to_2(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x double>, ptr [[X]], align 16
+; CHECK-NEXT:    [[X_OFF:%.*]] = getelementptr i8, ptr [[X]], i64 16
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x double>, ptr [[X_OFF]], align 16
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x double>, ptr [[Y]], align 16
+; CHECK-NEXT:    [[Y_OFF:%.*]] = getelementptr i8, ptr [[Y]], i64 16
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x double>, ptr [[Y_OFF]], align 16
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x double> [[X0]], <2 x double> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[V2:%.*]] = shufflevector <2 x double> [[Y0]], <2 x double> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x double> [[V1]], <4 x double> [[V2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; CHECK-NEXT:    ret <4 x double> [[RES]]
+;
+  %x0 = load <2 x double>, ptr %x
+  %x_off = getelementptr i8, ptr %x, i64 16
+  %x1 = load <2 x double>, ptr %x_off
+  %y0 = load <2 x double>, ptr %y
+  %y_off = getelementptr i8, ptr %y, i64 16
+  %y1 = load <2 x double>, ptr %y_off
+  %v1 = shufflevector <2 x double> %x0, <2 x double> %x1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %v2 = shufflevector <2 x double> %y0, <2 x double> %y1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x double> %v1, <4 x double> %v2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  ret <4 x double> %res
+}
+
+define <4 x double> @test_double_3_to_1(ptr %p) {
+; CHECK-LABEL: define <4 x double> @test_double_3_to_1(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x double>, ptr [[P]], align 16
+; CHECK-NEXT:    [[P_PLUS_8:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT:    [[L2:%.*]] = load <2 x double>, ptr [[P_PLUS_8]], align 8
+; CHECK-NEXT:    [[P_PLUS_16:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L3:%.*]] = load <2 x double>, ptr [[P_PLUS_16]], align 16
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x double> [[L1]], <2 x double> [[L2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x double> [[V1]], <2 x double> [[L3]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x double> [[RES]]
+;
+  %L1 = load <2 x double>, ptr %p, align 16
+  %p_plus_8 = getelementptr i8, ptr %p, i64 8
+  %L2 = load <2 x double>, ptr %p_plus_8, align 8
+  %p_plus_16 = getelementptr i8, ptr %p, i64 16
+  %L3 = load <2 x double>, ptr %p_plus_16, align 16
+  %v1 = shufflevector <2 x double> %L1, <2 x double> %L2, <2 x i32> <i32 0, i32 3>
+  %res = shufflevector <2 x double> %v1, <2 x double> %L3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x double> %res
+}
+
+; Float
+define <8 x float> @test_float_4_to_2(ptr %x, ptr %y) {
+; CHECK-LABEL: define <8 x float> @test_float_4_to_2(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x float>, ptr [[X]], align 8
+; CHECK-NEXT:    [[X_OFF:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x float>, ptr [[X_OFF]], align 8
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x float>, ptr [[Y]], align 8
+; CHECK-NEXT:    [[Y_OFF:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x float>, ptr [[Y_OFF]], align 8
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x float> [[X0]], <2 x float> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[V2:%.*]] = shufflevector <2 x float> [[Y0]], <2 x float> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[V1]], <4 x float> [[V2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    ret <8 x float> [[RES]]
+;
+  %x0 = load <2 x float>, ptr %x
+  %x_off = getelementptr i8, ptr %x, i64 8
+  %x1 = load <2 x float>, ptr %x_off
+  %y0 = load <2 x float>, ptr %y
+  %y_off = getelementptr i8, ptr %y, i64 8
+  %y1 = load <2 x float>, ptr %y_off
+  %v1 = shufflevector <2 x float> %x0, <2 x float> %x1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %v2 = shufflevector <2 x float> %y0, <2 x float> %y1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %v1w = shufflevector <4 x float> %v1, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+  %v2w = shufflevector <4 x float> %v2, <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+  %res = shufflevector <8 x float> %v1w, <8 x float> %v2w, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+  ret <8 x float> %res
+}
+
+define <4 x float> @test_float_3_to_1(ptr %p) {
+; CHECK-LABEL: define <4 x float> @test_float_3_to_1(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x float>, ptr [[P]], align 8
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x float>, ptr [[P1]], align 4
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT:    [[L2:%.*]] = load <2 x float>, ptr [[P2]], align 4
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x float> [[L0]], <2 x float> [[L1]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[V1]], <2 x float> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x float> [[RES]]
+;
+  %L0 = load <2 x float>, ptr %p, align 8
+  %p1 = getelementptr i8, ptr %p, i64 4
+  %L1 = load <2 x float>, ptr %p1, align 4
+  %p2 = getelementptr i8, ptr %p, i64 8
+  %L2 = load <2 x float>, ptr %p2, align 4
+  %v1 = shufflevector <2 x float> %L0, <2 x float> %L1, <2 x i32> <i32 0, i32 3>
+  %res = shufflevector <2 x float> %v1, <2 x float> %L2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x float> %res
+}
+
+; i64
+define <4 x i64> @test_i64_4_to_2(ptr %x, ptr %y) {
+; CHECK-LABEL: define <4 x i64> @test_i64_4_to_2(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x i64>, ptr [[X]], align 16
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 16
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x i64>, ptr [[XA]], align 16
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i64>, ptr [[Y]], align 16
+; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 16
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i64>, ptr [[YA]], align 16
+; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i64> [[X0]], <2 x i64> [[X1]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i64> [[Y0]], <2 x i64> [[Y1]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i64> [[VX]], <4 x i64> [[VY]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT:    ret <4 x i64> [[RES]]
+;
+  %x0 = load <2 x i64>, ptr %x, align 16
+  %xa = getelementptr i8, ptr %x, i64 16
+  %x1 = load <2 x i64>, ptr %xa, align 16
+  %y0 = load <2 x i64>, ptr %y, align 16
+  %ya = getelementptr i8, ptr %y, i64 16
+  %y1 = load <2 x i64>, ptr %ya, align 16
+  %vx = shufflevector <2 x i64> %x0, <2 x i64> %x1, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  %vy = shufflevector <2 x i64> %y0, <2 x i64> %y1, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  %res = shufflevector <4 x i64> %vx, <4 x i64> %vy, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  ret <4 x i64> %res
+}
+
+define <4 x i64> @test_i64_3_to_1_success(ptr %p) {
+; CHECK-LABEL: define <4 x i64> @test_i64_3_to_1_success(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x i64>, ptr [[P]], align 16
+; CHECK-NEXT:    [[PA:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x i64>, ptr [[PA]], align 8
+; CHECK-NEXT:    [[PB:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L2:%.*]] = load <2 x i64>, ptr [[PB]], align 16
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i64> [[L0]], <2 x i64> [[L1]], <2 x i32> <i32 1, i32 2>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x i64> [[V1]], <2 x i64> [[L2]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT:    ret <4 x i64> [[RES]]
+;
+  %L0 = load <2 x i64>, ptr %p, align 16
+  %pa = getelementptr i8, ptr %p, i64 8
+  %L1 = load <2 x i64>, ptr %pa, align 8
+  %pb = getelementptr i8, ptr %p, i64 16
+  %L2 = load <2 x i64>, ptr %pb, align 16
+  %v1 = shufflevector <2 x i64> %L0, <2 x i64> %L1, <2 x i32> <i32 1, i32 2>
+  %res = shufflevector <2 x i64> %v1, <2 x i64> %L2, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  ret <4 x i64> %res
+}
+
+; i32
+define <4 x i32> @test_i32_4_to_2_success(ptr %x, ptr %y) {
+; CHECK-LABEL: define <4 x i32> @test_i32_4_to_2_success(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT:    ret <4 x i32> [[RES]]
+;
+  %x0 = load <2 x i32>, ptr %x, align 8
+  %xa = getelementptr i8, ptr %x, i64 8
+  %x1 = load <2 x i32>, ptr %xa, align 8
+  %y0 = load <2 x i32>, ptr %y, align 8
+  %ya = getelementptr i8, ptr %y, i64 8
+  %y1 = load <2 x i32>, ptr %ya, align 8
+  %vx = shufflevector <2 x i32> %x0, <2 x i32> %x1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %vy = shufflevector <2 x i32> %y0, <2 x i32> %y1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x i32> %vx, <4 x i32> %vy, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  ret <4 x i32> %res
+}
+
+define <4 x i32> @test_i32_3_to_1(ptr %p) {
+; CHECK-LABEL: define <4 x i32> @test_i32_3_to_1(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x i32>, ptr [[P]], align 8
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x i32>, ptr [[P1]], align 4
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT:    [[L2:%.*]] = load <2 x i32>, ptr [[P2]], align 8
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i32> [[L0]], <2 x i32> [[L1]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x i32> [[V1]], <2 x i32> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x i32> [[RES]]
+;
+  %L0 = load <2 x i32>, ptr %p, align 8
+  %p1 = getelementptr i8, ptr %p, i64 4
+  %L1 = load <2 x i32>, ptr %p1, align 4
+  %p2 = getelementptr i8, ptr %p, i64 8
+  %L2 = load <2 x i32>, ptr %p2, align 8
+  %v1 = shufflevector <2 x i32> %L0, <2 x i32> %L1, <2 x i32> <i32 0, i32 3>
+  %res = shufflevector <2 x i32> %v1, <2 x i32> %L2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %res
+}
+
+; i16
+define <8 x i16> @test_i16_4_to_2(ptr %x, ptr %y) {
+; CHECK-LABEL: define <8 x i16> @test_i16_4_to_2(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x i16>, ptr [[X]], align 4
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 4
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x i16>, ptr [[XA]], align 4
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i16>, ptr [[Y]], align 4
+; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 4
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i16>, ptr [[YA]], align 4
+; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i16> [[X0]], <2 x i16> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i16> [[Y0]], <2 x i16> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i16> [[VX]], <4 x i16> [[VY]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    ret <8 x i16> [[RES]]
+;
+  %x0 = load <2 x i16>, ptr %x, align 4
+  %xa = getelementptr i8, ptr %x, i64 4
+  %x1 = load <2 x i16>, ptr %xa, align 4
+  %y0 = load <2 x i16>, ptr %y, align 4
+  %ya = getelementptr i8, ptr %y, i64 4
+  %y1 = load <2 x i16>, ptr %ya, align 4
+  %vx = shufflevector <2 x i16> %x0, <2 x i16> %x1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %vy = shufflevector <2 x i16> %y0, <2 x i16> %y1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x i16> %vx, <4 x i16> %vy, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_i16_3_to_1(ptr %p) {
+; CHECK-LABEL: define <8 x i16> @test_i16_3_to_1(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x i16>, ptr [[P]], align 4
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x i16>, ptr [[P1]], align 4
+; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; CHECK-NEXT:    [[L2:%.*]] = load <4 x i16>, ptr [[P2]], align 8
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i16> [[L0]], <2 x i16> [[L1]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i16> [[V1]], <4 x i16> [[L2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6>
+; CHECK-NEXT:    ret <8 x i16> [[RES]]
+;
+  %L0 = load <2 x i16>, ptr %p, align 4
+  %p1 = getelementptr i8, ptr %p, i64 4
+  %L1 = load <2 x i16>, ptr %p1, align 4
+  %p2 = getelementptr i8, ptr %p, i64 8
+  %L2 = load <4 x i16>, ptr %p2, align 8
+  %v1 = shufflevector <2 x i16> %L0, <2 x i16> %L1, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+  %res = shufflevector <4 x i16> %v1, <4 x i16> %L2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6>
+  ret <8 x i16> %res
+}
+
+; ============================================================================
+; Edge Cases & Safety
+; ============================================================================
+
+define <8 x float> @test_19_dead_lanes(ptr %p) {
+; CHECK-LABEL: define <8 x float> @test_19_dead_lanes(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L1:%.*]] = load <4 x float>, ptr [[P]], align 16
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 32
+; CHECK-NEXT:    [[L2:%.*]] = load <4 x float>, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[L1]], <4 x float> [[L2]], <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    ret <8 x float> [[RES]]
+;
+  %l1 = load <4 x float>, ptr %p
+  %gep = getelementptr inbounds i8, ptr %p, i64 32
+  %l2 = load <4 x float>, ptr %gep
+  %res = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 poison, i32 poison, i32 poison, i32 poison>
+  ret <8 x float> %res
+}
+
+; ====================================================================================
+; Negative
+; ====================================================================================
+
+; Scalar Type Mismatch
+define <4 x i32> @test_neg_trunc_mismatch(ptr %p) {
+; CHECK-LABEL: define <4 x i32> @test_neg_trunc_mismatch(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x i64>, ptr [[P]], align 16
+; CHECK-NEXT:    [[T0:%.*]] = trunc <2 x i64> [[L0]] to <2 x i32>
+; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L1:%.*]] = load <2 x i32>, ptr [[P1]], align 8
+; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i32> [[T0]], <2 x i32> [[L1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x i32> [[V1]]
+;
+  %L0 = load <2 x i64>, ptr %p, align 16
+  %T0 = trunc <2 x i64> %L0 to <2 x i32>
+  %p1 = getelementptr i8, ptr %p, i64 16
+  %L1 = load <2 x i32>, ptr %p1, align 8
+  %v1 = shufflevector <2 x i32> %T0, <2 x i32> %L1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %v1
+}
+
+; Multi-use (hasOneUse() == false)
+define <4 x i32> @test_neg_multi_use(ptr %x, ptr %y) {
+; CHECK-LABEL: define <4 x i32> @test_neg_multi_use(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; CHECK-NEXT:    call void @use(<2 x i32> [[X0]])
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT:    ret <4 x i32> [[RES]]
+;
+  %x0 = load <2 x i32>, ptr %x, align 8
+  call void @use(<2 x i32> %x0)
+  %xa = getelementptr i8, ptr %x, i64 8
+  %x1 = load <2 x i32>, ptr %xa, align 8
+  %y0 = load <2 x i32>, ptr %y, align 8
+  %ya = getelementptr i8, ptr %y, i64 8
+  %y1 = load <2 x i32>, ptr %ya, align 8
+  %vx = shufflevector <2 x i32> %x0, <2 x i32> %x1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  %vy = shufflevector <2 x i32> %y0, <2 x i32> %y1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  %res = shufflevector <4 x i32> %vx, <4 x i32> %vy, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+  ret <4 x i32> %res
+}
+
+; Misaligned (Offset % EltSize != 0)
+define <4 x i32> @test_neg_misaligned(ptr %x) {
+; CHECK-LABEL: define <4 x i32> @test_neg_misaligned(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 2
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 1
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x i32> [[RES]]
+;
+  %x0 = load <2 x i32>, ptr %x, align 8
+  %xa = getelementptr i8, ptr %x, i64 2
+  %x1 = load <2 x i32>, ptr %xa, align 1
+  %res = shufflevector <2 x i32> %x0, <2 x i32> %x1, <4 x i32> <i32 1, i32 0, i32 2, i32 3>
+  ret <4 x i32> %res
+}
+
+; Not a Load (Source is Arithmetic)
+define <4 x float> @test_neg_not_a_load(ptr %x) {
+; CHECK-LABEL: define <4 x float> @test_neg_not_a_load(
+; CHECK-SAME: ptr [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[X0:%.*]] = load <2 x float>, ptr [[X]], align 8
+; CHECK-NEXT:    [[V_ADD:%.*]] = fadd <2 x float> [[X0]], splat (float 1.000000e+00)
+; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; CHECK-NEXT:    [[X1:%.*]] = load <2 x float>, ptr [[XA]], align 8
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[V_ADD]], <2 x float> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    ret <4 x float> [[RES]]
+;
+  %x0 = load <2 x float>, ptr %x, align 8
+  %v_add = fadd <2 x float> %x0, <float 1.0, float 1.0>
+  %xa = getelementptr i8, ptr %x, i64 8
+  %x1 = load <2 x float>, ptr %xa, align 8
+  %res = shufflevector <2 x float> %v_add, <2 x float> %x1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x float> %res
+}
+
+; Too Many Bases (Bases > 2)
+define <4 x i32> @test_neg_too_many_bases(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: define <4 x i32> @test_neg_too_many_bases(
+; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]], ptr [[Z:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[LX:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; CHECK-NEXT:    [[LY:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; CHECK-NEXT:    [[LZ:%.*]] = load <2 x i32>, ptr [[Z]], align 8
+; CHECK-NEXT:    [[V_XY:%.*]] = shufflevector <2 x i32> [[LX]], <2 x i32> [[LY]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[V_Z:%.*]] = shufflevector <2 x i32> [[LZ]], <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[V_XY]], <4 x i32> [[V_Z]], <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+; CHECK-NEXT:    ret <4 x i32> [[RES]]
+;
+  %lx = load <2 x i32>, ptr %x, align 8
+  %ly = load <2 x i32>, ptr %y, align 8
+  %lz = load <2 x i32>, ptr %z, align 8
+  %v_xy = shufflevector <2 x i32> %lx, <2 x i32> %ly, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %v_z = shufflevector <2 x i32> %lz, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+  %res = shufflevector <4 x i32> %v_xy, <4 x i32> %v_z, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
+  ret <4 x i32> %res
+}
+
+define <8 x double> @test_widen_2xf64_with_poison(ptr %p) {
+; CHECK-LABEL: define <8 x double> @test_widen_2xf64_with_poison(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L_BIG:%.*]] = load <4 x double>, ptr [[P]], align 32
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 64
+; CHECK-NEXT:    [[L_SMALL:%.*]] = load <2 x double>, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[L_SMALL_PAD:%.*]] = shufflevector <2 x double> [[L_SMALL]], <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x double> [[L_BIG]], <4 x double> [[L_SMALL_PAD]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    ret <8 x double> [[RES]]
+;
+  %l_big = load <4 x double>, ptr %p, align 32
+  %gep = getelementptr inbounds i8, ptr %p, i64 64
+  %l_small = load <2 x double>, ptr %gep, align 16
+  %l_small_pad = shufflevector <2 x double> %l_small, <2 x double> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+  %res = shufflevector <4 x double> %l_big, <4 x double> %l_small_pad, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x double> %res
+}
+
+define <32 x i8> @test_widen_8xi8_with_poison(ptr %p) {
+; CHECK-LABEL: define <32 x i8> @test_widen_8xi8_with_poison(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[L_SMALL:%.*]] = load <8 x i8>, ptr [[P]], align 8
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 16
+; CHECK-NEXT:    [[L_BIG:%.*]] = load <16 x i8>, ptr [[GEP]], align 16
+; CHECK-NEXT:    [[L_SMALL_PAD:%.*]] = shufflevector <8 x i8> [[L_SMALL]], <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <16 x i8> [[L_SMALL_PAD]], <16 x i8> [[L_BIG]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    ret <32 x i8> [[RES]]
+;
+  %l_small = load <8 x i8>, ptr %p, align 8
+  %gep = getelementptr inbounds i8, ptr %p, i64 16
+  %l_big = load <16 x i8>, ptr %gep, align 16
+  %l_small_pad = shufflevector <8 x i8> %l_small, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+  %res = shufflevector <16 x i8> %l_small_pad, <16 x i8> %l_big, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+  ret <32 x i8> %res
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX: {{.*}}
+; SSE: {{.*}}

>From 63594f40a4e75393ef7dcd1c3fc02e096c46ee18 Mon Sep 17 00:00:00 2001
From: Hanbum Park <kese111 at gmail.com>
Date: Tue, 20 Jan 2026 05:56:11 +0900
Subject: [PATCH 2/2] [VectorCombine] Consolidate fragmented loads from shuffle
 chains into wide loads

LLVM currently lacks a dedicated optimization to handle scenarios
where a wide vector is constructed incrementally from multiple narrow
loads (e.g., scalars or <2 x T>) originating from adjacent memory
addresses. Without this transformation, LLVM generates a series of
serialized shufflevector instructions that merge these small loads
one by one. This results in unnecessarily deep shuffle trees and
fragmented memory operations in the IR, even when the data could
be grouped more efficiently.

By recognizing that these fragmented loads often share a common base
pointer and reside within a small memory range, we can reorganize the
shuffle structure for better performance.

This patch introduces foldShuffleOfFragmentedLoads to VectorCombine.
The optimization identifies these patterns by tracing lane origins
and collapses the complex, multi-level shuffle tree into a clean
two-level hierarchy. It groups the fragmented loads into intermediate
"wide buffers" for each base pointer and replaces the entire chain
with a single final selection shuffle.

Proof: https://alive2.llvm.org/ce/z/Q6Yn-p
Fixed: #140234
---
 .../Transforms/Vectorize/VectorCombine.cpp    | 342 ++++++++++++++++++
 .../VectorCombine/AArch64/select-shuffle.ll   |   3 +-
 .../X86/shuffle-of-fragment-loads.ll          | 245 ++++++++-----
 3 files changed, 504 insertions(+), 86 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 708763a7eb982..67bb74c367ab6 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -140,6 +140,7 @@ class VectorCombine {
   bool foldShuffleOfSelects(Instruction &I);
   bool foldShuffleOfCastops(Instruction &I);
   bool foldShuffleOfShuffles(Instruction &I);
+  bool foldShuffleOfFragmentedLoads(Instruction &I);
   bool foldPermuteOfIntrinsic(Instruction &I);
   bool foldShufflesOfLengthChangingShuffles(Instruction &I);
   bool foldShuffleOfIntrinsics(Instruction &I);
@@ -5069,6 +5070,345 @@ bool VectorCombine::shrinkPhiOfShuffles(Instruction &I) {
   return true;
 }
 
+/// Returns the index of the element within the given load \p L that corresponds
+/// to the \p TargetBit offset. Returns -1 if the data is out of the load's
+/// range or if it is not aligned with the element boundaries.
+static int getIdxInLoad(LoadInst *L, int64_t TargetBit, unsigned ElementSize,
+                        const DataLayout &DL) {
+  if (!L)
+    return -1;
+
+  int64_t LOffBytes;
+  GetPointerBaseWithConstantOffset(L->getPointerOperand(), LOffBytes, DL);
+  int64_t LOffBits = LOffBytes * 8; // Convert to bit-level offset.
+  uint64_t LSizeBits = DL.getTypeStoreSizeInBits(L->getType());
+  // Check if the target range is fully contained within the load.
+  if (TargetBit >= LOffBits &&
+      (TargetBit + ElementSize) <= (LOffBits + (int64_t)LSizeBits)) {
+    // The target must start at a valid element boundary to be addressable.
+    if ((TargetBit - LOffBits) % ElementSize == 0)
+      return (int)((TargetBit - LOffBits) / ElementSize);
+  }
+
+  return -1;
+}
+
+/// Estimate the cost of a shuffle by mapping the mask to a \c ShuffleKind.
+/// Queries \p TTI using the identified pattern and provided operands.
+static InstructionCost getShuffleCost(ArrayRef<int> Mask, VectorType *RetTy,
+                                      VectorType *SrcTy, TTI::TargetCostKind CK,
+                                      const TargetTransformInfo &TTI,
+                                      Value *Op0 = nullptr,
+                                      Value *Op1 = nullptr,
+                                      const Instruction *I = nullptr) {
+  unsigned SrcNum = cast<FixedVectorType>(SrcTy)->getNumElements();
+  TargetTransformInfo::ShuffleKind Kind = TargetTransformInfo::SK_PermuteTwoSrc;
+  if (ShuffleVectorInst::isSingleSourceMask(Mask, SrcNum)) {
+    if (ShuffleVectorInst::isZeroEltSplatMask(Mask, SrcNum))
+      Kind = TargetTransformInfo::SK_Broadcast;
+    else if (ShuffleVectorInst::isReverseMask(Mask, SrcNum))
+      Kind = TargetTransformInfo::SK_Reverse;
+    else
+      Kind = TargetTransformInfo::SK_PermuteSingleSrc;
+  } else {
+    if (ShuffleVectorInst::isSelectMask(Mask, SrcNum))
+      Kind = TargetTransformInfo::SK_Select;
+    else if (ShuffleVectorInst::isTransposeMask(Mask, SrcNum))
+      Kind = TargetTransformInfo::SK_Transpose;
+  }
+
+  SmallVector<const Value *> Args = {};
+  if (Op0 && Op1) {
+    Args.push_back(Op0);
+    Args.push_back(Op1);
+  }
+  InstructionCost Cost =
+      TTI.getShuffleCost(Kind, RetTy, SrcTy, Mask, CK, 0, nullptr, Args, I);
+  return Cost;
+}
+
+struct LaneOrigin {
+  LoadInst *LI = nullptr;
+  int64_t OffsetInBits = 0;
+  Value *BasePtr = nullptr;
+  unsigned IndexInLI = 0;
+};
+
+struct ShufflePlan {
+  LoadInst *A = nullptr;
+  LoadInst *B = nullptr;
+  SmallVector<int, 16> ConcatMask;
+  bool NeedsPromotion = false;
+  FixedVectorType *PromotionTargetTy = nullptr;
+  Value *ResultV = nullptr;
+};
+
+/// Consolidate a tree of shuffles fed by fragmented narrow loads from adjacent
+/// memory addresses into a structured two-level shuffle hierarchy.
+///
+/// This transformation flattens deep "incremental build" shuffle trees by
+/// grouping lane origins by their base pointers. It constructs intermediate
+/// buffers for each base, where each lane is pre-positioned to match its
+/// final destination in the result vector.
+///
+/// Example Source (Incremental Build):
+///   %0 = load <2 x d>, ptr %x      ; Mem: [x+0, x+8]
+///   %1 = load <2 x d>, ptr %y      ; Mem: [y+0, y+8]
+///   %v1 = shuffle %0, %1, <1, 2>   ; Reg: [x[1], y[0]] (Lanes 0, 1)
+///   %2 = load <2 x d>, ptr %x+16   ; Mem: [x+16, x+24]
+///   %v2 = shuffle %v1, %2, <0, 1, 3> ; Reg: [x[1], y[0], x[3]] (Lanes 0, 1, 2)
+///   %3 = load <2 x d>, ptr %y+16   ; Mem: [y+16, y+24]
+///   %res = shuffle %v2, %3, <0, 1, 2, 4> ; Result: [x[1], y[0], x[3], y[2]]
+///
+/// Memory View of Final Result:
+///   Lane 0: BaseX + 8B  (x[1])
+///   Lane 1: BaseY + 0B  (y[0])
+///   Lane 2: BaseX + 24B (x[3])
+///   Lane 3: BaseY + 16B (y[2])
+///
+/// Transformed Structure (Mapping-Sync Buffers):
+///   ; Each buffer lane i matches result lane i if the base pointer matches.
+///   %buf_x = shuffle %0, %2, <1, u, 3, u>    ; Reg: [x[1], undef, x[3], undef]
+///   %buf_y = shuffle %1, %3, <u, 0, u, 2>    ; Reg: [undef, y[0], undef, y[2]]
+///   %res   = shuffle %buf_x, %buf_y, <0, 5, 2, 7> ; Final Selection
+///
+/// This avoids unsafe speculative load widening by utilizing existing narrow
+/// loads while significantly reducing shuffle latency and tree depth.
+bool VectorCombine::foldShuffleOfFragmentedLoads(Instruction &I) {
+  auto *VT = dyn_cast<FixedVectorType>(I.getType());
+  if (!VT || I.use_empty())
+    return false;
+
+  unsigned NumElts = VT->getNumElements();
+  Type *EltTy = VT->getElementType();
+  unsigned ElementSize = DL->getTypeSizeInBits(EltTy);
+  SmallVector<LaneOrigin, 4> Origins(NumElts);
+  SmallVector<Value *, 2> Bases;
+  SmallPtrSet<Instruction *, 8> VisitedShuffles;
+  for (unsigned Lane = 0; Lane < NumElts; ++Lane) {
+    Value *V = &I;
+    unsigned CurrLane = Lane;
+    while (auto *SVI = dyn_cast<ShuffleVectorInst>(V)) {
+      // Collect unique shuffle instructions in the original tree to accurately
+      // estimate the original cost without double-counting.
+      VisitedShuffles.insert(SVI);
+      int M = SVI->getMaskValue(CurrLane);
+      if (M < 0) {
+        V = nullptr;
+        break;
+      }
+      unsigned NumOp0 = cast<FixedVectorType>(SVI->getOperand(0)->getType())
+                            ->getNumElements();
+      if ((unsigned)M < NumOp0) {
+        V = SVI->getOperand(0);
+        CurrLane = M;
+      } else {
+        V = SVI->getOperand(1);
+        CurrLane = M - NumOp0;
+      }
+    }
+    if (!V)
+      continue;
+
+    auto *LI = dyn_cast<LoadInst>(V);
+    if (!LI || LI->getType()->getScalarType() != EltTy)
+      return false;
+
+    int64_t ConstOff = 0;
+    Value *Base = GetPointerBaseWithConstantOffset(LI->getPointerOperand(),
+                                                   ConstOff, *DL);
+    if (!Base)
+      return false;
+
+    int64_t OffsetInBits = (ConstOff * 8) + (int64_t)(CurrLane * ElementSize);
+    Origins[Lane] = {LI, OffsetInBits, Base, (unsigned)CurrLane};
+    if (!is_contained(Bases, Base)) {
+      if (Bases.size() >= 2)
+        return false;
+      Bases.push_back(Base);
+    }
+  }
+
+  auto CanWidenSafety = [&](LoadInst *L, FixedVectorType *TargetTy, int BIdx) {
+    if (cast<FixedVectorType>(L->getType()) == TargetTy)
+      return true;
+
+    // Determinism Guard: Abort if any lane associated with this base is
+    // indeterminate (poison/undef). This ensures that we do not widen
+    // memory accesses for uncertain references.
+    for (unsigned I = 0; I < NumElts; ++I)
+      if (Origins[I].BasePtr == Bases[BIdx] && !Origins[I].LI)
+        return false;
+
+    // Verify that all elements actually utilized by the final shuffle result
+    // are deterministic. Widening is disallowed if the root shuffle mask
+    // references a lane that originates from a poison value in the source tree.
+    auto *SVI = cast<ShuffleVectorInst>(&I);
+    for (unsigned I = 0; I < NumElts; ++I) {
+      int M = SVI->getMaskValue(I);
+      if (M < 0)
+        continue;
+
+      // Determine if the mask index points to the current base being processed.
+      unsigned TargetBaseIdx = (unsigned)M < NumElts ? 0 : 1;
+      if (TargetBaseIdx == (unsigned)BIdx)
+        if (!Origins[I].LI)
+          return false;
+    }
+
+    return canWidenLoad(L, TTI);
+  };
+
+  SmallVector<ShufflePlan, 2> BasePlans;
+  for (unsigned BIdx = 0; BIdx < Bases.size(); ++BIdx) {
+    Value *Base = Bases[BIdx];
+    // Collect required offsets and candidate loads for this base.
+    SmallVector<LoadInst *, 8> Candidates;
+    SmallVector<int64_t, 8> NeededOffsets;
+    for (const auto &O : Origins) {
+      if (O.BasePtr == Base && O.LI) {
+        if (!is_contained(Candidates, O.LI))
+          Candidates.push_back(O.LI);
+        if (!is_contained(NeededOffsets, O.OffsetInBits))
+          NeededOffsets.push_back(O.OffsetInBits);
+      }
+    }
+
+    // Find a pair of loads that collectively cover all required lanes.
+    LoadInst *LI_A = nullptr, *LI_B = nullptr;
+    for (unsigned I = 0; I < Candidates.size(); ++I) {
+      for (unsigned J = I; J < Candidates.size(); ++J) {
+        auto *L1 = Candidates[I], *L2 = Candidates[J];
+        auto IsCovered = [&](int64_t Off) {
+          return getIdxInLoad(L1, Off, ElementSize, *DL) != -1 ||
+                 getIdxInLoad(L2, Off, ElementSize, *DL) != -1;
+        };
+        if (all_of(NeededOffsets, IsCovered)) {
+          LI_A = L1;
+          LI_B = L2;
+          break;
+        }
+      }
+      if (LI_A)
+        break;
+    }
+    if (!LI_A)
+      return false;
+
+    ShufflePlan Plan;
+    Plan.A = LI_A;
+    Plan.B = (LI_B != LI_A) ? LI_B : nullptr;
+    // Check if the types of the chosen loads match. If not, determine if we
+    // can safely widen the narrower load to the larger target type to
+    // consolidate the memory layout.
+    auto *TyA = cast<FixedVectorType>(Plan.A->getType());
+    if (Plan.B) {
+      auto *TyB = cast<FixedVectorType>(Plan.B->getType());
+      if (TyA != TyB) {
+        auto *TargetTy =
+            (DL->getTypeSizeInBits(TyA) > DL->getTypeSizeInBits(TyB)) ? TyA
+                                                                      : TyB;
+        if (!CanWidenSafety(Plan.A, TargetTy, BIdx) ||
+            !CanWidenSafety(Plan.B, TargetTy, BIdx))
+          return false;
+        Plan.NeedsPromotion = true;
+        Plan.PromotionTargetTy = TargetTy;
+      }
+    }
+
+    // Create the buffer mask where lane i matches final result lane i.
+    Plan.ConcatMask.assign(NumElts, -1);
+    unsigned CommonNum = Plan.PromotionTargetTy
+                             ? Plan.PromotionTargetTy->getNumElements()
+                             : TyA->getNumElements();
+    for (unsigned I = 0; I < NumElts; ++I) {
+      if (Origins[I].BasePtr != Base)
+        continue;
+      int Idx = getIdxInLoad(Plan.A, Origins[I].OffsetInBits, ElementSize, *DL);
+      if (Idx == -1 && Plan.B) {
+        int IdxInB =
+            getIdxInLoad(Plan.B, Origins[I].OffsetInBits, ElementSize, *DL);
+        if (IdxInB != -1)
+          Idx = IdxInB + CommonNum;
+      }
+      Plan.ConcatMask[I] = Idx;
+    }
+    BasePlans.push_back(Plan);
+  }
+
+  InstructionCost OldCost = 0, NewCost = 0;
+  // Calculate the total cost of existing shuffles in the tree.
+  for (Instruction *OldI : VisitedShuffles) {
+    auto *SVI = cast<ShuffleVectorInst>(OldI);
+    OldCost += getShuffleCost(
+        SVI->getShuffleMask(), cast<VectorType>(SVI->getType()),
+        cast<VectorType>(SVI->getOperand(0)->getType()), CostKind, TTI,
+        SVI->getOperand(0), SVI->getOperand(1), SVI);
+  }
+
+  // Pre-calculate the final selection mask from intermediate buffers.
+  SmallVector<int, 16> FinalMask(NumElts, -1);
+  for (unsigned I = 0; I < NumElts; ++I) {
+    if (Origins[I].LI) {
+      unsigned BIdx = (Origins[I].BasePtr == Bases[0]) ? 0 : 1;
+      FinalMask[I] = I + (BIdx * NumElts);
+    }
+  }
+
+  // Aggregate costs for the proposed plan: load widening and buffer shuffles.
+  for (unsigned BIdx = 0; BIdx < BasePlans.size(); ++BIdx) {
+    const auto &P = BasePlans[BIdx];
+    auto *SrcVTy = P.PromotionTargetTy ? cast<VectorType>(P.PromotionTargetTy)
+                                       : cast<VectorType>(P.A->getType());
+    // Add cost of memory access if widening is required.
+    if (P.NeedsPromotion)
+      NewCost += TTI.getMemoryOpCost(Instruction::Load, P.PromotionTargetTy,
+                                     P.A->getAlign(),
+                                     P.A->getPointerAddressSpace(), CostKind);
+
+    // Estimate the cost of intermediate buffer assembly.
+    NewCost +=
+        getShuffleCost(P.ConcatMask, VT, SrcVTy, CostKind, TTI, P.A, P.B);
+  }
+
+  // Add the cost of the final selection shuffle.
+  NewCost += getShuffleCost(FinalMask, VT, VT, CostKind, TTI, nullptr, nullptr);
+  LLVM_DEBUG(dbgs() << "Found a shuffle tree of fragmented loads: " << I
+                    << "\n  OldCost: " << OldCost << " vs NewCost: " << NewCost
+                    << "\n");
+  if (NewCost >= OldCost)
+    return false;
+
+  for (auto &P : BasePlans) {
+    Value *OpA = P.A, *OpB = P.B;
+    // Helper to widen a narrow load by creating a new load at the same pointer.
+    auto Widen = [&](LoadInst *OldL) {
+      auto *NewL = Builder.CreateAlignedLoad(
+          P.PromotionTargetTy, OldL->getPointerOperand(), OldL->getAlign());
+      NewL->takeName(OldL);
+      return NewL;
+    };
+    // If planned, perform memory widening to match the common buffer type.
+    if (P.NeedsPromotion) {
+      if (cast<FixedVectorType>(OpA->getType()) != P.PromotionTargetTy)
+        OpA = Widen(cast<LoadInst>(OpA));
+      if (OpB && cast<FixedVectorType>(OpB->getType()) != P.PromotionTargetTy)
+        OpB = Widen(cast<LoadInst>(OpB));
+    }
+    // Create the intermediate buffer for this base pointer.
+    P.ResultV = Builder.CreateShuffleVector(
+        OpA, OpB ? OpB : PoisonValue::get(OpA->getType()), P.ConcatMask);
+  }
+
+  // Combine the base-specific buffers into the final result.
+  Value *V0 = BasePlans[0].ResultV, *V1 = (BasePlans.size() > 1)
+                                              ? BasePlans[1].ResultV
+                                              : PoisonValue::get(VT);
+  Value *FinalResult = Builder.CreateShuffleVector(V0, V1, FinalMask);
+  replaceValue(I, *FinalResult);
+  return true;
+}
+
 /// This is the entry point for all transforms. Pass manager differences are
 /// handled in the callers of this function.
 bool VectorCombine::run() {
@@ -5166,6 +5506,8 @@ bool VectorCombine::run() {
           return true;
         if (foldShuffleToIdentity(I))
           return true;
+        if (foldShuffleOfFragmentedLoads(I))
+          return true;
         break;
       case Instruction::Load:
         if (shrinkLoadForShuffles(I))
diff --git a/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll b/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
index 3a3ba74663b93..26ef07ec67f0c 100644
--- a/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
+++ b/llvm/test/Transforms/VectorCombine/AArch64/select-shuffle.ll
@@ -463,7 +463,8 @@ define void @test_1652048214(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    [[G1:%.*]] = getelementptr <8 x i32>, ptr [[SRC]], i32 1
 ; CHECK-NEXT:    [[L1:%.*]] = load <8 x i32>, ptr [[G1]], align 32
 ; CHECK-NEXT:    [[S0:%.*]] = shufflevector <8 x i32> [[L1]], <8 x i32> [[L0]], <4 x i32> <i32 6, i32 2, i32 14, i32 6>
-; CHECK-NEXT:    [[S1:%.*]] = shufflevector <8 x i32> [[L0]], <8 x i32> [[L0]], <4 x i32> <i32 9, i32 5, i32 1, i32 0>
+; CHECK-NEXT:    [[V_BASE_BUFFER:%.*]] = shufflevector <8 x i32> [[L0]], <8 x i32> poison, <4 x i32> <i32 1, i32 5, i32 1, i32 0>
+; CHECK-NEXT:    [[S1:%.*]] = shufflevector <4 x i32> [[V_BASE_BUFFER]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[ADD:%.*]] = add <4 x i32> [[S1]], [[S1]]
 ; CHECK-NEXT:    [[SUB:%.*]] = sub <4 x i32> [[S0]], [[S1]]
 ; CHECK-NEXT:    [[T0:%.*]] = shufflevector <4 x i32> [[SUB]], <4 x i32> [[ADD]], <4 x i32> <i32 1, i32 5, i32 7, i32 3>
diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll
index 858ddeb2e2943..947bca276d6d0 100644
--- a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-fragment-loads.ll
@@ -79,18 +79,31 @@ define <4 x float> @test_single_load(ptr %p) {
 ; ====================================================================================
 
 define <4 x double> @test_double_4_to_2(ptr %x, ptr %y) {
-; CHECK-LABEL: define <4 x double> @test_double_4_to_2(
-; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[X0:%.*]] = load <2 x double>, ptr [[X]], align 16
-; CHECK-NEXT:    [[X_OFF:%.*]] = getelementptr i8, ptr [[X]], i64 16
-; CHECK-NEXT:    [[X1:%.*]] = load <2 x double>, ptr [[X_OFF]], align 16
-; CHECK-NEXT:    [[Y0:%.*]] = load <2 x double>, ptr [[Y]], align 16
-; CHECK-NEXT:    [[Y_OFF:%.*]] = getelementptr i8, ptr [[Y]], i64 16
-; CHECK-NEXT:    [[Y1:%.*]] = load <2 x double>, ptr [[Y_OFF]], align 16
-; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x double> [[X0]], <2 x double> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[V2:%.*]] = shufflevector <2 x double> [[Y0]], <2 x double> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x double> [[V1]], <4 x double> [[V2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; CHECK-NEXT:    ret <4 x double> [[RES]]
+; SSE-LABEL: define <4 x double> @test_double_4_to_2(
+; SSE-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[X0:%.*]] = load <2 x double>, ptr [[X]], align 16
+; SSE-NEXT:    [[X_OFF:%.*]] = getelementptr i8, ptr [[X]], i64 16
+; SSE-NEXT:    [[X1:%.*]] = load <2 x double>, ptr [[X_OFF]], align 16
+; SSE-NEXT:    [[Y0:%.*]] = load <2 x double>, ptr [[Y]], align 16
+; SSE-NEXT:    [[Y_OFF:%.*]] = getelementptr i8, ptr [[Y]], i64 16
+; SSE-NEXT:    [[Y1:%.*]] = load <2 x double>, ptr [[Y_OFF]], align 16
+; SSE-NEXT:    [[V1:%.*]] = shufflevector <2 x double> [[X0]], <2 x double> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[V2:%.*]] = shufflevector <2 x double> [[Y0]], <2 x double> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <4 x double> [[V1]], <4 x double> [[V2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+; SSE-NEXT:    ret <4 x double> [[RES]]
+;
+; AVX-LABEL: define <4 x double> @test_double_4_to_2(
+; AVX-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[X0:%.*]] = load <2 x double>, ptr [[X]], align 16
+; AVX-NEXT:    [[X_OFF:%.*]] = getelementptr i8, ptr [[X]], i64 16
+; AVX-NEXT:    [[X1:%.*]] = load <2 x double>, ptr [[X_OFF]], align 16
+; AVX-NEXT:    [[Y0:%.*]] = load <2 x double>, ptr [[Y]], align 16
+; AVX-NEXT:    [[Y_OFF:%.*]] = getelementptr i8, ptr [[Y]], i64 16
+; AVX-NEXT:    [[Y1:%.*]] = load <2 x double>, ptr [[Y_OFF]], align 16
+; AVX-NEXT:    [[TMP1:%.*]] = shufflevector <2 x double> [[X0]], <2 x double> [[X1]], <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
+; AVX-NEXT:    [[TMP2:%.*]] = shufflevector <2 x double> [[Y0]], <2 x double> [[Y1]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 2>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; AVX-NEXT:    ret <4 x double> [[RES]]
 ;
   %x0 = load <2 x double>, ptr %x
   %x_off = getelementptr i8, ptr %x, i64 16
@@ -156,16 +169,24 @@ define <8 x float> @test_float_4_to_2(ptr %x, ptr %y) {
 }
 
 define <4 x float> @test_float_3_to_1(ptr %p) {
-; CHECK-LABEL: define <4 x float> @test_float_3_to_1(
-; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[L0:%.*]] = load <2 x float>, ptr [[P]], align 8
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
-; CHECK-NEXT:    [[L1:%.*]] = load <2 x float>, ptr [[P1]], align 4
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
-; CHECK-NEXT:    [[L2:%.*]] = load <2 x float>, ptr [[P2]], align 4
-; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x float> [[L0]], <2 x float> [[L1]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[V1]], <2 x float> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    ret <4 x float> [[RES]]
+; SSE-LABEL: define <4 x float> @test_float_3_to_1(
+; SSE-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[L0:%.*]] = load <2 x float>, ptr [[P]], align 8
+; SSE-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; SSE-NEXT:    [[L2:%.*]] = load <2 x float>, ptr [[P2]], align 4
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[L0]], <2 x float> [[L2]], <4 x i32> <i32 0, i32 2, i32 2, i32 3>
+; SSE-NEXT:    ret <4 x float> [[RES]]
+;
+; AVX-LABEL: define <4 x float> @test_float_3_to_1(
+; AVX-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[L0:%.*]] = load <2 x float>, ptr [[P]], align 8
+; AVX-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; AVX-NEXT:    [[L1:%.*]] = load <2 x float>, ptr [[P1]], align 4
+; AVX-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; AVX-NEXT:    [[L2:%.*]] = load <2 x float>, ptr [[P2]], align 4
+; AVX-NEXT:    [[V1:%.*]] = shufflevector <2 x float> [[L0]], <2 x float> [[L1]], <2 x i32> <i32 0, i32 3>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <2 x float> [[V1]], <2 x float> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; AVX-NEXT:    ret <4 x float> [[RES]]
 ;
   %L0 = load <2 x float>, ptr %p, align 8
   %p1 = getelementptr i8, ptr %p, i64 4
@@ -187,9 +208,9 @@ define <4 x i64> @test_i64_4_to_2(ptr %x, ptr %y) {
 ; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i64>, ptr [[Y]], align 16
 ; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 16
 ; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i64>, ptr [[YA]], align 16
-; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i64> [[X0]], <2 x i64> [[X1]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i64> [[Y0]], <2 x i64> [[Y1]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i64> [[VX]], <4 x i64> [[VY]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i64> [[X0]], <2 x i64> [[X1]], <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[Y0]], <2 x i64> [[Y1]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 2>
+; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %x0 = load <2 x i64>, ptr %x, align 16
@@ -205,16 +226,25 @@ define <4 x i64> @test_i64_4_to_2(ptr %x, ptr %y) {
 }
 
 define <4 x i64> @test_i64_3_to_1_success(ptr %p) {
-; CHECK-LABEL: define <4 x i64> @test_i64_3_to_1_success(
-; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[L0:%.*]] = load <2 x i64>, ptr [[P]], align 16
-; CHECK-NEXT:    [[PA:%.*]] = getelementptr i8, ptr [[P]], i64 8
-; CHECK-NEXT:    [[L1:%.*]] = load <2 x i64>, ptr [[PA]], align 8
-; CHECK-NEXT:    [[PB:%.*]] = getelementptr i8, ptr [[P]], i64 16
-; CHECK-NEXT:    [[L2:%.*]] = load <2 x i64>, ptr [[PB]], align 16
-; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i64> [[L0]], <2 x i64> [[L1]], <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x i64> [[V1]], <2 x i64> [[L2]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT:    ret <4 x i64> [[RES]]
+; SSE-LABEL: define <4 x i64> @test_i64_3_to_1_success(
+; SSE-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[PA:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; SSE-NEXT:    [[L1:%.*]] = load <2 x i64>, ptr [[PA]], align 8
+; SSE-NEXT:    [[PB:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; SSE-NEXT:    [[L2:%.*]] = load <2 x i64>, ptr [[PB]], align 16
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <2 x i64> [[L1]], <2 x i64> [[L2]], <4 x i32> <i32 0, i32 0, i32 3, i32 1>
+; SSE-NEXT:    ret <4 x i64> [[RES]]
+;
+; AVX-LABEL: define <4 x i64> @test_i64_3_to_1_success(
+; AVX-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[L0:%.*]] = load <2 x i64>, ptr [[P]], align 16
+; AVX-NEXT:    [[PA:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; AVX-NEXT:    [[L1:%.*]] = load <2 x i64>, ptr [[PA]], align 8
+; AVX-NEXT:    [[PB:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; AVX-NEXT:    [[L2:%.*]] = load <2 x i64>, ptr [[PB]], align 16
+; AVX-NEXT:    [[V1:%.*]] = shufflevector <2 x i64> [[L0]], <2 x i64> [[L1]], <2 x i32> <i32 1, i32 2>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <2 x i64> [[V1]], <2 x i64> [[L2]], <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; AVX-NEXT:    ret <4 x i64> [[RES]]
 ;
   %L0 = load <2 x i64>, ptr %p, align 16
   %pa = getelementptr i8, ptr %p, i64 8
@@ -228,18 +258,31 @@ define <4 x i64> @test_i64_3_to_1_success(ptr %p) {
 
 ; i32
 define <4 x i32> @test_i32_4_to_2_success(ptr %x, ptr %y) {
-; CHECK-LABEL: define <4 x i32> @test_i32_4_to_2_success(
-; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
-; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
-; CHECK-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
-; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
-; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
-; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
-; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-; CHECK-NEXT:    ret <4 x i32> [[RES]]
+; SSE-LABEL: define <4 x i32> @test_i32_4_to_2_success(
+; SSE-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; SSE-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; SSE-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; SSE-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; SSE-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; SSE-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; SSE-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; SSE-NEXT:    ret <4 x i32> [[RES]]
+;
+; AVX-LABEL: define <4 x i32> @test_i32_4_to_2_success(
+; AVX-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; AVX-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; AVX-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; AVX-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; AVX-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; AVX-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; AVX-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 1, i32 3, i32 poison, i32 poison>
+; AVX-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 poison, i32 poison, i32 1, i32 3>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; AVX-NEXT:    ret <4 x i32> [[RES]]
 ;
   %x0 = load <2 x i32>, ptr %x, align 8
   %xa = getelementptr i8, ptr %x, i64 8
@@ -254,16 +297,24 @@ define <4 x i32> @test_i32_4_to_2_success(ptr %x, ptr %y) {
 }
 
 define <4 x i32> @test_i32_3_to_1(ptr %p) {
-; CHECK-LABEL: define <4 x i32> @test_i32_3_to_1(
-; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[L0:%.*]] = load <2 x i32>, ptr [[P]], align 8
-; CHECK-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
-; CHECK-NEXT:    [[L1:%.*]] = load <2 x i32>, ptr [[P1]], align 4
-; CHECK-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
-; CHECK-NEXT:    [[L2:%.*]] = load <2 x i32>, ptr [[P2]], align 8
-; CHECK-NEXT:    [[V1:%.*]] = shufflevector <2 x i32> [[L0]], <2 x i32> [[L1]], <2 x i32> <i32 0, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <2 x i32> [[V1]], <2 x i32> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    ret <4 x i32> [[RES]]
+; SSE-LABEL: define <4 x i32> @test_i32_3_to_1(
+; SSE-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[L0:%.*]] = load <2 x i32>, ptr [[P]], align 8
+; SSE-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; SSE-NEXT:    [[L2:%.*]] = load <2 x i32>, ptr [[P2]], align 8
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <2 x i32> [[L0]], <2 x i32> [[L2]], <4 x i32> <i32 0, i32 2, i32 2, i32 3>
+; SSE-NEXT:    ret <4 x i32> [[RES]]
+;
+; AVX-LABEL: define <4 x i32> @test_i32_3_to_1(
+; AVX-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[L0:%.*]] = load <2 x i32>, ptr [[P]], align 8
+; AVX-NEXT:    [[P1:%.*]] = getelementptr i8, ptr [[P]], i64 4
+; AVX-NEXT:    [[L1:%.*]] = load <2 x i32>, ptr [[P1]], align 4
+; AVX-NEXT:    [[P2:%.*]] = getelementptr i8, ptr [[P]], i64 8
+; AVX-NEXT:    [[L2:%.*]] = load <2 x i32>, ptr [[P2]], align 8
+; AVX-NEXT:    [[V1:%.*]] = shufflevector <2 x i32> [[L0]], <2 x i32> [[L1]], <2 x i32> <i32 0, i32 3>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <2 x i32> [[V1]], <2 x i32> [[L2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; AVX-NEXT:    ret <4 x i32> [[RES]]
 ;
   %L0 = load <2 x i32>, ptr %p, align 8
   %p1 = getelementptr i8, ptr %p, i64 4
@@ -277,18 +328,31 @@ define <4 x i32> @test_i32_3_to_1(ptr %p) {
 
 ; i16
 define <8 x i16> @test_i16_4_to_2(ptr %x, ptr %y) {
-; CHECK-LABEL: define <8 x i16> @test_i16_4_to_2(
-; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[X0:%.*]] = load <2 x i16>, ptr [[X]], align 4
-; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 4
-; CHECK-NEXT:    [[X1:%.*]] = load <2 x i16>, ptr [[XA]], align 4
-; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i16>, ptr [[Y]], align 4
-; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 4
-; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i16>, ptr [[YA]], align 4
-; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i16> [[X0]], <2 x i16> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i16> [[Y0]], <2 x i16> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i16> [[VX]], <4 x i16> [[VY]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-; CHECK-NEXT:    ret <8 x i16> [[RES]]
+; SSE-LABEL: define <8 x i16> @test_i16_4_to_2(
+; SSE-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[X0:%.*]] = load <2 x i16>, ptr [[X]], align 4
+; SSE-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 4
+; SSE-NEXT:    [[X1:%.*]] = load <2 x i16>, ptr [[XA]], align 4
+; SSE-NEXT:    [[Y0:%.*]] = load <2 x i16>, ptr [[Y]], align 4
+; SSE-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 4
+; SSE-NEXT:    [[Y1:%.*]] = load <2 x i16>, ptr [[YA]], align 4
+; SSE-NEXT:    [[VX:%.*]] = shufflevector <2 x i16> [[X0]], <2 x i16> [[X1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[VY:%.*]] = shufflevector <2 x i16> [[Y0]], <2 x i16> [[Y1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <4 x i16> [[VX]], <4 x i16> [[VY]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; SSE-NEXT:    ret <8 x i16> [[RES]]
+;
+; AVX-LABEL: define <8 x i16> @test_i16_4_to_2(
+; AVX-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[X0:%.*]] = load <2 x i16>, ptr [[X]], align 4
+; AVX-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 4
+; AVX-NEXT:    [[X1:%.*]] = load <2 x i16>, ptr [[XA]], align 4
+; AVX-NEXT:    [[Y0:%.*]] = load <2 x i16>, ptr [[Y]], align 4
+; AVX-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 4
+; AVX-NEXT:    [[Y1:%.*]] = load <2 x i16>, ptr [[YA]], align 4
+; AVX-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i16> [[X0]], <2 x i16> [[X1]], <8 x i32> <i32 0, i32 poison, i32 1, i32 poison, i32 2, i32 poison, i32 3, i32 poison>
+; AVX-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i16> [[Y0]], <2 x i16> [[Y1]], <8 x i32> <i32 poison, i32 0, i32 poison, i32 1, i32 poison, i32 2, i32 poison, i32 3>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP2]], <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+; AVX-NEXT:    ret <8 x i16> [[RES]]
 ;
   %x0 = load <2 x i16>, ptr %x, align 4
   %xa = getelementptr i8, ptr %x, i64 4
@@ -369,19 +433,33 @@ define <4 x i32> @test_neg_trunc_mismatch(ptr %p) {
 
 ; Multi-use (hasOneUse() == false)
 define <4 x i32> @test_neg_multi_use(ptr %x, ptr %y) {
-; CHECK-LABEL: define <4 x i32> @test_neg_multi_use(
-; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
-; CHECK-NEXT:    call void @use(<2 x i32> [[X0]])
-; CHECK-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
-; CHECK-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
-; CHECK-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
-; CHECK-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
-; CHECK-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
-; CHECK-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-; CHECK-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT:    ret <4 x i32> [[RES]]
+; SSE-LABEL: define <4 x i32> @test_neg_multi_use(
+; SSE-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; SSE-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; SSE-NEXT:    call void @use(<2 x i32> [[X0]])
+; SSE-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; SSE-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; SSE-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; SSE-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; SSE-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; SSE-NEXT:    [[VX:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; SSE-NEXT:    [[VY:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; SSE-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[VX]], <4 x i32> [[VY]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; SSE-NEXT:    ret <4 x i32> [[RES]]
+;
+; AVX-LABEL: define <4 x i32> @test_neg_multi_use(
+; AVX-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) #[[ATTR0]] {
+; AVX-NEXT:    [[X0:%.*]] = load <2 x i32>, ptr [[X]], align 8
+; AVX-NEXT:    call void @use(<2 x i32> [[X0]])
+; AVX-NEXT:    [[XA:%.*]] = getelementptr i8, ptr [[X]], i64 8
+; AVX-NEXT:    [[X1:%.*]] = load <2 x i32>, ptr [[XA]], align 8
+; AVX-NEXT:    [[Y0:%.*]] = load <2 x i32>, ptr [[Y]], align 8
+; AVX-NEXT:    [[YA:%.*]] = getelementptr i8, ptr [[Y]], i64 8
+; AVX-NEXT:    [[Y1:%.*]] = load <2 x i32>, ptr [[YA]], align 8
+; AVX-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i32> [[X0]], <2 x i32> [[X1]], <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
+; AVX-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i32> [[Y0]], <2 x i32> [[Y1]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 2>
+; AVX-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; AVX-NEXT:    ret <4 x i32> [[RES]]
 ;
   %x0 = load <2 x i32>, ptr %x, align 8
   call void @use(<2 x i32> %x0)
@@ -489,6 +567,3 @@ define <32 x i8> @test_widen_8xi8_with_poison(ptr %p) {
   ret <32 x i8> %res
 }
 
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX: {{.*}}
-; SSE: {{.*}}



More information about the llvm-commits mailing list