[llvm] [AMDGPU][AMDGPULateCodeGenPrepare] Combine scalarized selects back into vector selects (PR #173990)

Pankaj Dwivedi via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 30 06:43:09 PST 2025


https://github.com/PankajDwivedi-25 updated https://github.com/llvm/llvm-project/pull/173990

>From a5260ed026e6431a21f41897b8da3a4b59a876a2 Mon Sep 17 00:00:00 2001
From: padivedi <padivedi at amd.com>
Date: Tue, 30 Dec 2025 18:53:14 +0530
Subject: [PATCH 1/2] Combine scalarized selects back into vector selects

---
 .../AMDGPU/AMDGPULateCodeGenPrepare.cpp       | 149 ++++
 .../CodeGen/AMDGPU/combine-scalar-selects.ll  | 638 ++++++++++++++++++
 2 files changed, 787 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/combine-scalar-selects.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
index 63e265612cbf7..9e5b076409862 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
@@ -14,6 +14,7 @@
 
 #include "AMDGPU.h"
 #include "AMDGPUTargetMachine.h"
+#include "llvm/ADT/DenseMap.h"
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
 #include "llvm/Analysis/ValueTracking.h"
@@ -21,14 +22,17 @@
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Transforms/Utils/Local.h"
 
 #define DEBUG_TYPE "amdgpu-late-codegenprepare"
 
 using namespace llvm;
+using namespace llvm::PatternMatch;
 
 // Scalar load widening needs running after load-store-vectorizer as that pass
 // doesn't handle overlapping cases. In addition, this pass enhances the
@@ -40,6 +44,12 @@ static cl::opt<bool>
                         "AMDGPULateCodeGenPrepare"),
                cl::ReallyHidden, cl::init(true));
 
+static cl::opt<bool> CombineScalarSelects(
+    "amdgpu-late-codegenprepare-combine-scalar-selects",
+    cl::desc("Combine scalarized selects back into vector selects in "
+             "AMDGPULateCodeGenPrepare"),
+    cl::ReallyHidden, cl::init(true));
+
 namespace {
 
 class AMDGPULateCodeGenPrepare
@@ -68,6 +78,24 @@ class AMDGPULateCodeGenPrepare
 
   bool canWidenScalarExtLoad(LoadInst &LI) const;
   bool visitLoadInst(LoadInst &LI);
+
+  /// Combine scalarized selects from a bitcast back into a vector select.
+  ///
+  /// This optimization addresses VGPR bloat from patterns like:
+  ///   %vec = bitcast <4 x i32> %src to <16 x i8>
+  ///   %e0 = extractelement <16 x i8> %vec, i64 0
+  ///   %s0 = select i1 %cond, i8 %e0, i8 0
+  ///   ... (repeated for all 16 elements)
+  ///
+  /// Which generates 16 separate v_cndmask_b32 instructions. Instead, we
+  /// transform it to:
+  ///   %sel = select i1 %cond, <4 x i32> %src, <4 x i32> zeroinitializer
+  ///   %vec = bitcast <4 x i32> %sel to <16 x i8>
+  ///   %e0 = extractelement <16 x i8> %vec, i64 0
+  ///   ...
+  ///
+  /// This produces only 4 v_cndmask_b32 instructions operating on dwords.
+  bool tryCombineSelectsFromBitcast(BitCastInst &BC);
 };
 
 using ValueToValueMap = DenseMap<const Value *, Value *>;
@@ -225,6 +253,20 @@ bool AMDGPULateCodeGenPrepare::run() {
       Changed |= LRO.optimizeLiveType(&I, DeadInsts);
     }
 
+  // Combine scalarized selects back into vector selects.
+  // This uses a top-down approach: iterate over bitcasts (i32 vec -> i8 vec)
+  // and collect all select instructions that use extracted elements with a
+  // zero false value. By starting from the bitcast, we process each source
+  // exactly once, avoiding redundant work when multiple selects share a source.
+  if (CombineScalarSelects) {
+    for (auto &BB : F) {
+      for (Instruction &I : make_early_inc_range(BB)) {
+        if (auto *BC = dyn_cast<BitCastInst>(&I))
+          Changed |= tryCombineSelectsFromBitcast(*BC);
+      }
+    }
+  }
+
   RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts);
   return Changed;
 }
@@ -551,6 +593,113 @@ bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
   return true;
 }
 
+bool AMDGPULateCodeGenPrepare::tryCombineSelectsFromBitcast(BitCastInst &BC) {
+  auto *SrcVecTy = dyn_cast<FixedVectorType>(BC.getSrcTy());
+  auto *DstVecTy = dyn_cast<FixedVectorType>(BC.getDestTy());
+  if (!SrcVecTy || !DstVecTy)
+    return false;
+
+  // Must be: bitcast <N x i32> to <M x i8>
+  if (!SrcVecTy->getElementType()->isIntegerTy(32) ||
+      !DstVecTy->getElementType()->isIntegerTy(8))
+    return false;
+
+  unsigned NumDstElts = DstVecTy->getNumElements();
+  BasicBlock *BB = BC.getParent();
+
+  // Require at least half the elements to have matching selects.
+  // For v16i8 (from v4i32), this means at least 8 selects must match.
+  // This threshold ensures the transformation is profitable.
+  unsigned MinRequired = NumDstElts / 2;
+
+  // Early exit: not enough users to possibly meet the threshold.
+  if (BC.getNumUses() < MinRequired)
+    return false;
+
+  // Group selects by their condition value. Different conditions selecting
+  // from the same bitcast are handled as independent groups, allowing us to
+  // optimize multiple select patterns from a single bitcast.
+  struct SelectGroup {
+    // Map from element index to (select, extractelement) pair.
+    SmallDenseMap<unsigned, std::pair<SelectInst *, ExtractElementInst *>, 16>
+        Selects;
+    // Track the earliest select instruction for correct insertion point.
+    SelectInst *FirstSelect = nullptr;
+  };
+  DenseMap<Value *, SelectGroup> ConditionGroups;
+
+  // Collect all matching select patterns in a single pass.
+  // Pattern: select i1 %cond, i8 (extractelement %bc, idx), i8 0
+  for (User *U : BC.users()) {
+    auto *Ext = dyn_cast<ExtractElementInst>(U);
+    if (!Ext || Ext->getParent() != BB)
+      continue;
+
+    auto *IdxC = dyn_cast<ConstantInt>(Ext->getIndexOperand());
+    if (!IdxC || IdxC->getZExtValue() >= NumDstElts)
+      continue;
+
+    unsigned Idx = IdxC->getZExtValue();
+
+    for (User *EU : Ext->users()) {
+      auto *Sel = dyn_cast<SelectInst>(EU);
+      // Must be: select %cond, %extract, 0 (in same BB)
+      if (!Sel || Sel->getParent() != BB || Sel->getTrueValue() != Ext ||
+          !match(Sel->getFalseValue(), m_Zero()))
+        continue;
+
+      auto &Group = ConditionGroups[Sel->getCondition()];
+      Group.Selects[Idx] = {Sel, Ext};
+
+      // Track earliest select to ensure correct dominance for insertion.
+      if (!Group.FirstSelect || Sel->comesBefore(Group.FirstSelect))
+        Group.FirstSelect = Sel;
+    }
+  }
+
+  bool Changed = false;
+
+  // Process each condition group that meets the threshold.
+  for (auto &[Cond, Group] : ConditionGroups) {
+    if (Group.Selects.size() < MinRequired)
+      continue;
+
+    LLVM_DEBUG(dbgs() << "AMDGPULateCodeGenPrepare: Combining "
+                      << Group.Selects.size()
+                      << " scalar selects into vector select\n");
+
+    // Insert before the first select to maintain dominance.
+    IRBuilder<> Builder(Group.FirstSelect);
+
+    // Create vector select: select i1 %cond, <N x i32> %src, zeroinitializer
+    Value *VecSel =
+        Builder.CreateSelect(Cond, BC.getOperand(0),
+                             Constant::getNullValue(SrcVecTy), "combined.sel");
+
+    // Bitcast the selected vector back to the byte vector type.
+    Value *NewBC = Builder.CreateBitCast(VecSel, DstVecTy, "combined.bc");
+
+    // Replace each scalar select with an extract from the combined result.
+    for (auto &[Idx, Pair] : Group.Selects) {
+      Value *NewExt = Builder.CreateExtractElement(NewBC, Idx);
+      Pair.first->replaceAllUsesWith(NewExt);
+      DeadInsts.emplace_back(Pair.first);
+
+      // Mark the original extract as dead if it has no remaining uses.
+      if (Pair.second->use_empty())
+        DeadInsts.emplace_back(Pair.second);
+    }
+
+    Changed = true;
+  }
+
+  // Mark the original bitcast as dead if all its users were replaced.
+  if (Changed && BC.use_empty())
+    DeadInsts.emplace_back(&BC);
+
+  return Changed;
+}
+
 PreservedAnalyses
 AMDGPULateCodeGenPreparePass::run(Function &F, FunctionAnalysisManager &FAM) {
   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
diff --git a/llvm/test/CodeGen/AMDGPU/combine-scalar-selects.ll b/llvm/test/CodeGen/AMDGPU/combine-scalar-selects.ll
new file mode 100644
index 0000000000000..973da749d9daf
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine-scalar-selects.ll
@@ -0,0 +1,638 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-late-codegenprepare -S %s | FileCheck %s --check-prefix=CHECK-OPT
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-late-codegenprepare -amdgpu-late-codegenprepare-combine-scalar-selects=false -S %s | FileCheck %s --check-prefix=CHECK-NOOPT
+
+; Test that multiple scalar selects from the same vector source are combined
+; back into a vector select when the optimization is enabled, and remain as
+; individual scalar selects when disabled.
+
+; This pattern occurs when buffer_load_dwordx4 results are bitcast to v16i8,
+; then each byte is extracted and conditionally selected with zero.
+
+define amdgpu_kernel void @combine_scalar_selects_v16i8(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @combine_scalar_selects_v16i8(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[BUFFER_RESOURCE:%.*]], i32 [[OFFSET:%.*]], i1 [[VALID:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[LOADED:%.*]] = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> [[BUFFER_RESOURCE]], i32 [[OFFSET]], i32 0, i32 0)
+; CHECK-OPT-NEXT:    [[COMBINED_SEL:%.*]] = select i1 [[VALID]], <4 x i32> [[LOADED]], <4 x i32> zeroinitializer
+; CHECK-OPT-NEXT:    [[COMBINED_BC:%.*]] = bitcast <4 x i32> [[COMBINED_SEL]] to <16 x i8>
+; CHECK-OPT-NEXT:    [[TMP0:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 0
+; CHECK-OPT-NEXT:    [[TMP1:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 7
+; CHECK-OPT-NEXT:    [[TMP2:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 14
+; CHECK-OPT-NEXT:    [[TMP3:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 2
+; CHECK-OPT-NEXT:    [[TMP4:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 9
+; CHECK-OPT-NEXT:    [[TMP5:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 4
+; CHECK-OPT-NEXT:    [[TMP6:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 11
+; CHECK-OPT-NEXT:    [[TMP7:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 6
+; CHECK-OPT-NEXT:    [[TMP8:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 13
+; CHECK-OPT-NEXT:    [[TMP9:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 1
+; CHECK-OPT-NEXT:    [[TMP10:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 8
+; CHECK-OPT-NEXT:    [[TMP11:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 15
+; CHECK-OPT-NEXT:    [[TMP12:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 3
+; CHECK-OPT-NEXT:    [[TMP13:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 10
+; CHECK-OPT-NEXT:    [[TMP14:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 5
+; CHECK-OPT-NEXT:    [[TMP15:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 12
+; CHECK-OPT-NEXT:    store i8 [[TMP0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-OPT-NEXT:    store i8 [[TMP9]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-OPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-OPT-NEXT:    store i8 [[TMP3]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-OPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[TMP12]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-OPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-OPT-NEXT:    store i8 [[TMP5]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-OPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-OPT-NEXT:    store i8 [[TMP14]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-OPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-OPT-NEXT:    store i8 [[TMP7]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-OPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-OPT-NEXT:    store i8 [[TMP1]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-OPT-NEXT:    [[PTR8:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 8
+; CHECK-OPT-NEXT:    store i8 [[TMP10]], ptr addrspace(1) [[PTR8]], align 1
+; CHECK-OPT-NEXT:    [[PTR9:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 9
+; CHECK-OPT-NEXT:    store i8 [[TMP4]], ptr addrspace(1) [[PTR9]], align 1
+; CHECK-OPT-NEXT:    [[PTR10:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 10
+; CHECK-OPT-NEXT:    store i8 [[TMP13]], ptr addrspace(1) [[PTR10]], align 1
+; CHECK-OPT-NEXT:    [[PTR11:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 11
+; CHECK-OPT-NEXT:    store i8 [[TMP6]], ptr addrspace(1) [[PTR11]], align 1
+; CHECK-OPT-NEXT:    [[PTR12:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 12
+; CHECK-OPT-NEXT:    store i8 [[TMP15]], ptr addrspace(1) [[PTR12]], align 1
+; CHECK-OPT-NEXT:    [[PTR13:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 13
+; CHECK-OPT-NEXT:    store i8 [[TMP8]], ptr addrspace(1) [[PTR13]], align 1
+; CHECK-OPT-NEXT:    [[PTR14:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 14
+; CHECK-OPT-NEXT:    store i8 [[TMP2]], ptr addrspace(1) [[PTR14]], align 1
+; CHECK-OPT-NEXT:    [[PTR15:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 15
+; CHECK-OPT-NEXT:    store i8 [[TMP11]], ptr addrspace(1) [[PTR15]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @combine_scalar_selects_v16i8(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[BUFFER_RESOURCE:%.*]], i32 [[OFFSET:%.*]], i1 [[VALID:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[LOADED:%.*]] = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> [[BUFFER_RESOURCE]], i32 [[OFFSET]], i32 0, i32 0)
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[LOADED]] to <16 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[E1:%.*]] = extractelement <16 x i8> [[BYTES]], i64 1
+; CHECK-NOOPT-NEXT:    [[E2:%.*]] = extractelement <16 x i8> [[BYTES]], i64 2
+; CHECK-NOOPT-NEXT:    [[E3:%.*]] = extractelement <16 x i8> [[BYTES]], i64 3
+; CHECK-NOOPT-NEXT:    [[E4:%.*]] = extractelement <16 x i8> [[BYTES]], i64 4
+; CHECK-NOOPT-NEXT:    [[E5:%.*]] = extractelement <16 x i8> [[BYTES]], i64 5
+; CHECK-NOOPT-NEXT:    [[E6:%.*]] = extractelement <16 x i8> [[BYTES]], i64 6
+; CHECK-NOOPT-NEXT:    [[E7:%.*]] = extractelement <16 x i8> [[BYTES]], i64 7
+; CHECK-NOOPT-NEXT:    [[E8:%.*]] = extractelement <16 x i8> [[BYTES]], i64 8
+; CHECK-NOOPT-NEXT:    [[E9:%.*]] = extractelement <16 x i8> [[BYTES]], i64 9
+; CHECK-NOOPT-NEXT:    [[E10:%.*]] = extractelement <16 x i8> [[BYTES]], i64 10
+; CHECK-NOOPT-NEXT:    [[E11:%.*]] = extractelement <16 x i8> [[BYTES]], i64 11
+; CHECK-NOOPT-NEXT:    [[E12:%.*]] = extractelement <16 x i8> [[BYTES]], i64 12
+; CHECK-NOOPT-NEXT:    [[E13:%.*]] = extractelement <16 x i8> [[BYTES]], i64 13
+; CHECK-NOOPT-NEXT:    [[E14:%.*]] = extractelement <16 x i8> [[BYTES]], i64 14
+; CHECK-NOOPT-NEXT:    [[E15:%.*]] = extractelement <16 x i8> [[BYTES]], i64 15
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[VALID]], i8 [[E0]], i8 0
+; CHECK-NOOPT-NEXT:    [[S1:%.*]] = select i1 [[VALID]], i8 [[E1]], i8 0
+; CHECK-NOOPT-NEXT:    [[S2:%.*]] = select i1 [[VALID]], i8 [[E2]], i8 0
+; CHECK-NOOPT-NEXT:    [[S3:%.*]] = select i1 [[VALID]], i8 [[E3]], i8 0
+; CHECK-NOOPT-NEXT:    [[S4:%.*]] = select i1 [[VALID]], i8 [[E4]], i8 0
+; CHECK-NOOPT-NEXT:    [[S5:%.*]] = select i1 [[VALID]], i8 [[E5]], i8 0
+; CHECK-NOOPT-NEXT:    [[S6:%.*]] = select i1 [[VALID]], i8 [[E6]], i8 0
+; CHECK-NOOPT-NEXT:    [[S7:%.*]] = select i1 [[VALID]], i8 [[E7]], i8 0
+; CHECK-NOOPT-NEXT:    [[S8:%.*]] = select i1 [[VALID]], i8 [[E8]], i8 0
+; CHECK-NOOPT-NEXT:    [[S9:%.*]] = select i1 [[VALID]], i8 [[E9]], i8 0
+; CHECK-NOOPT-NEXT:    [[S10:%.*]] = select i1 [[VALID]], i8 [[E10]], i8 0
+; CHECK-NOOPT-NEXT:    [[S11:%.*]] = select i1 [[VALID]], i8 [[E11]], i8 0
+; CHECK-NOOPT-NEXT:    [[S12:%.*]] = select i1 [[VALID]], i8 [[E12]], i8 0
+; CHECK-NOOPT-NEXT:    [[S13:%.*]] = select i1 [[VALID]], i8 [[E13]], i8 0
+; CHECK-NOOPT-NEXT:    [[S14:%.*]] = select i1 [[VALID]], i8 [[E14]], i8 0
+; CHECK-NOOPT-NEXT:    [[S15:%.*]] = select i1 [[VALID]], i8 [[E15]], i8 0
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-NOOPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-NOOPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-NOOPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-NOOPT-NEXT:    store i8 [[S4]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-NOOPT-NEXT:    store i8 [[S5]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-NOOPT-NEXT:    store i8 [[S6]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-NOOPT-NEXT:    store i8 [[S7]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR8:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 8
+; CHECK-NOOPT-NEXT:    store i8 [[S8]], ptr addrspace(1) [[PTR8]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR9:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 9
+; CHECK-NOOPT-NEXT:    store i8 [[S9]], ptr addrspace(1) [[PTR9]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR10:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 10
+; CHECK-NOOPT-NEXT:    store i8 [[S10]], ptr addrspace(1) [[PTR10]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR11:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 11
+; CHECK-NOOPT-NEXT:    store i8 [[S11]], ptr addrspace(1) [[PTR11]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR12:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 12
+; CHECK-NOOPT-NEXT:    store i8 [[S12]], ptr addrspace(1) [[PTR12]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR13:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 13
+; CHECK-NOOPT-NEXT:    store i8 [[S13]], ptr addrspace(1) [[PTR13]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR14:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 14
+; CHECK-NOOPT-NEXT:    store i8 [[S14]], ptr addrspace(1) [[PTR14]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR15:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 15
+; CHECK-NOOPT-NEXT:    store i8 [[S15]], ptr addrspace(1) [[PTR15]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <4 x i32> %buffer_resource,
+  i32 %offset,
+  i1 %valid
+) {
+entry:
+  %loaded = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %buffer_resource, i32 %offset, i32 0, i32 0)
+  %bytes = bitcast <4 x i32> %loaded to <16 x i8>
+
+  %e0 = extractelement <16 x i8> %bytes, i64 0
+  %e1 = extractelement <16 x i8> %bytes, i64 1
+  %e2 = extractelement <16 x i8> %bytes, i64 2
+  %e3 = extractelement <16 x i8> %bytes, i64 3
+  %e4 = extractelement <16 x i8> %bytes, i64 4
+  %e5 = extractelement <16 x i8> %bytes, i64 5
+  %e6 = extractelement <16 x i8> %bytes, i64 6
+  %e7 = extractelement <16 x i8> %bytes, i64 7
+  %e8 = extractelement <16 x i8> %bytes, i64 8
+  %e9 = extractelement <16 x i8> %bytes, i64 9
+  %e10 = extractelement <16 x i8> %bytes, i64 10
+  %e11 = extractelement <16 x i8> %bytes, i64 11
+  %e12 = extractelement <16 x i8> %bytes, i64 12
+  %e13 = extractelement <16 x i8> %bytes, i64 13
+  %e14 = extractelement <16 x i8> %bytes, i64 14
+  %e15 = extractelement <16 x i8> %bytes, i64 15
+
+  %s0 = select i1 %valid, i8 %e0, i8 0
+  %s1 = select i1 %valid, i8 %e1, i8 0
+  %s2 = select i1 %valid, i8 %e2, i8 0
+  %s3 = select i1 %valid, i8 %e3, i8 0
+  %s4 = select i1 %valid, i8 %e4, i8 0
+  %s5 = select i1 %valid, i8 %e5, i8 0
+  %s6 = select i1 %valid, i8 %e6, i8 0
+  %s7 = select i1 %valid, i8 %e7, i8 0
+  %s8 = select i1 %valid, i8 %e8, i8 0
+  %s9 = select i1 %valid, i8 %e9, i8 0
+  %s10 = select i1 %valid, i8 %e10, i8 0
+  %s11 = select i1 %valid, i8 %e11, i8 0
+  %s12 = select i1 %valid, i8 %e12, i8 0
+  %s13 = select i1 %valid, i8 %e13, i8 0
+  %s14 = select i1 %valid, i8 %e14, i8 0
+  %s15 = select i1 %valid, i8 %e15, i8 0
+
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  %ptr4 = getelementptr i8, ptr addrspace(1) %out, i64 4
+  store i8 %s4, ptr addrspace(1) %ptr4, align 1
+  %ptr5 = getelementptr i8, ptr addrspace(1) %out, i64 5
+  store i8 %s5, ptr addrspace(1) %ptr5, align 1
+  %ptr6 = getelementptr i8, ptr addrspace(1) %out, i64 6
+  store i8 %s6, ptr addrspace(1) %ptr6, align 1
+  %ptr7 = getelementptr i8, ptr addrspace(1) %out, i64 7
+  store i8 %s7, ptr addrspace(1) %ptr7, align 1
+  %ptr8 = getelementptr i8, ptr addrspace(1) %out, i64 8
+  store i8 %s8, ptr addrspace(1) %ptr8, align 1
+  %ptr9 = getelementptr i8, ptr addrspace(1) %out, i64 9
+  store i8 %s9, ptr addrspace(1) %ptr9, align 1
+  %ptr10 = getelementptr i8, ptr addrspace(1) %out, i64 10
+  store i8 %s10, ptr addrspace(1) %ptr10, align 1
+  %ptr11 = getelementptr i8, ptr addrspace(1) %out, i64 11
+  store i8 %s11, ptr addrspace(1) %ptr11, align 1
+  %ptr12 = getelementptr i8, ptr addrspace(1) %out, i64 12
+  store i8 %s12, ptr addrspace(1) %ptr12, align 1
+  %ptr13 = getelementptr i8, ptr addrspace(1) %out, i64 13
+  store i8 %s13, ptr addrspace(1) %ptr13, align 1
+  %ptr14 = getelementptr i8, ptr addrspace(1) %out, i64 14
+  store i8 %s14, ptr addrspace(1) %ptr14, align 1
+  %ptr15 = getelementptr i8, ptr addrspace(1) %out, i64 15
+  store i8 %s15, ptr addrspace(1) %ptr15, align 1
+
+  ret void
+}
+
+; Test with v8i8 from v2i32 (smaller vector)
+define amdgpu_kernel void @combine_scalar_selects_v8i8(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @combine_scalar_selects_v8i8(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <2 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[COMBINED_SEL:%.*]] = select i1 [[COND]], <2 x i32> [[SRC]], <2 x i32> zeroinitializer
+; CHECK-OPT-NEXT:    [[COMBINED_BC:%.*]] = bitcast <2 x i32> [[COMBINED_SEL]] to <8 x i8>
+; CHECK-OPT-NEXT:    [[TMP0:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 0
+; CHECK-OPT-NEXT:    [[TMP1:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 7
+; CHECK-OPT-NEXT:    [[TMP2:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 4
+; CHECK-OPT-NEXT:    [[TMP3:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 1
+; CHECK-OPT-NEXT:    [[TMP4:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 5
+; CHECK-OPT-NEXT:    [[TMP5:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 2
+; CHECK-OPT-NEXT:    [[TMP6:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 6
+; CHECK-OPT-NEXT:    [[TMP7:%.*]] = extractelement <8 x i8> [[COMBINED_BC]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[TMP0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-OPT-NEXT:    store i8 [[TMP3]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-OPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-OPT-NEXT:    store i8 [[TMP5]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-OPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[TMP7]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-OPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-OPT-NEXT:    store i8 [[TMP2]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-OPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-OPT-NEXT:    store i8 [[TMP4]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-OPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-OPT-NEXT:    store i8 [[TMP6]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-OPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-OPT-NEXT:    store i8 [[TMP1]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @combine_scalar_selects_v8i8(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <2 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <8 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[E1:%.*]] = extractelement <8 x i8> [[BYTES]], i64 1
+; CHECK-NOOPT-NEXT:    [[E2:%.*]] = extractelement <8 x i8> [[BYTES]], i64 2
+; CHECK-NOOPT-NEXT:    [[E3:%.*]] = extractelement <8 x i8> [[BYTES]], i64 3
+; CHECK-NOOPT-NEXT:    [[E4:%.*]] = extractelement <8 x i8> [[BYTES]], i64 4
+; CHECK-NOOPT-NEXT:    [[E5:%.*]] = extractelement <8 x i8> [[BYTES]], i64 5
+; CHECK-NOOPT-NEXT:    [[E6:%.*]] = extractelement <8 x i8> [[BYTES]], i64 6
+; CHECK-NOOPT-NEXT:    [[E7:%.*]] = extractelement <8 x i8> [[BYTES]], i64 7
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 [[E0]], i8 0
+; CHECK-NOOPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 [[E1]], i8 0
+; CHECK-NOOPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 [[E2]], i8 0
+; CHECK-NOOPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 [[E3]], i8 0
+; CHECK-NOOPT-NEXT:    [[S4:%.*]] = select i1 [[COND]], i8 [[E4]], i8 0
+; CHECK-NOOPT-NEXT:    [[S5:%.*]] = select i1 [[COND]], i8 [[E5]], i8 0
+; CHECK-NOOPT-NEXT:    [[S6:%.*]] = select i1 [[COND]], i8 [[E6]], i8 0
+; CHECK-NOOPT-NEXT:    [[S7:%.*]] = select i1 [[COND]], i8 [[E7]], i8 0
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-NOOPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-NOOPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-NOOPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-NOOPT-NEXT:    store i8 [[S4]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-NOOPT-NEXT:    store i8 [[S5]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-NOOPT-NEXT:    store i8 [[S6]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-NOOPT-NEXT:    store i8 [[S7]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <2 x i32> %src,
+  i1 %cond
+) {
+entry:
+  %bytes = bitcast <2 x i32> %src to <8 x i8>
+  %e0 = extractelement <8 x i8> %bytes, i64 0
+  %e1 = extractelement <8 x i8> %bytes, i64 1
+  %e2 = extractelement <8 x i8> %bytes, i64 2
+  %e3 = extractelement <8 x i8> %bytes, i64 3
+  %e4 = extractelement <8 x i8> %bytes, i64 4
+  %e5 = extractelement <8 x i8> %bytes, i64 5
+  %e6 = extractelement <8 x i8> %bytes, i64 6
+  %e7 = extractelement <8 x i8> %bytes, i64 7
+  %s0 = select i1 %cond, i8 %e0, i8 0
+  %s1 = select i1 %cond, i8 %e1, i8 0
+  %s2 = select i1 %cond, i8 %e2, i8 0
+  %s3 = select i1 %cond, i8 %e3, i8 0
+  %s4 = select i1 %cond, i8 %e4, i8 0
+  %s5 = select i1 %cond, i8 %e5, i8 0
+  %s6 = select i1 %cond, i8 %e6, i8 0
+  %s7 = select i1 %cond, i8 %e7, i8 0
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  %ptr4 = getelementptr i8, ptr addrspace(1) %out, i64 4
+  store i8 %s4, ptr addrspace(1) %ptr4, align 1
+  %ptr5 = getelementptr i8, ptr addrspace(1) %out, i64 5
+  store i8 %s5, ptr addrspace(1) %ptr5, align 1
+  %ptr6 = getelementptr i8, ptr addrspace(1) %out, i64 6
+  store i8 %s6, ptr addrspace(1) %ptr6, align 1
+  %ptr7 = getelementptr i8, ptr addrspace(1) %out, i64 7
+  store i8 %s7, ptr addrspace(1) %ptr7, align 1
+  ret void
+}
+
+; Test partial coverage: 10 out of 16 elements (should still combine, >= half)
+define amdgpu_kernel void @combine_partial_selects(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @combine_partial_selects(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[COMBINED_SEL:%.*]] = select i1 [[COND]], <4 x i32> [[SRC]], <4 x i32> zeroinitializer
+; CHECK-OPT-NEXT:    [[COMBINED_BC:%.*]] = bitcast <4 x i32> [[COMBINED_SEL]] to <16 x i8>
+; CHECK-OPT-NEXT:    [[TMP0:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 0
+; CHECK-OPT-NEXT:    [[TMP1:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 7
+; CHECK-OPT-NEXT:    [[TMP2:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 4
+; CHECK-OPT-NEXT:    [[TMP3:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 1
+; CHECK-OPT-NEXT:    [[TMP4:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 8
+; CHECK-OPT-NEXT:    [[TMP5:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 5
+; CHECK-OPT-NEXT:    [[TMP6:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 2
+; CHECK-OPT-NEXT:    [[TMP7:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 9
+; CHECK-OPT-NEXT:    [[TMP8:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 6
+; CHECK-OPT-NEXT:    [[TMP9:%.*]] = extractelement <16 x i8> [[COMBINED_BC]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[TMP0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-OPT-NEXT:    store i8 [[TMP3]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-OPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-OPT-NEXT:    store i8 [[TMP6]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-OPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[TMP9]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-OPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-OPT-NEXT:    store i8 [[TMP2]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-OPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-OPT-NEXT:    store i8 [[TMP5]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-OPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-OPT-NEXT:    store i8 [[TMP8]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-OPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-OPT-NEXT:    store i8 [[TMP1]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-OPT-NEXT:    [[PTR8:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 8
+; CHECK-OPT-NEXT:    store i8 [[TMP4]], ptr addrspace(1) [[PTR8]], align 1
+; CHECK-OPT-NEXT:    [[PTR9:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 9
+; CHECK-OPT-NEXT:    store i8 [[TMP7]], ptr addrspace(1) [[PTR9]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @combine_partial_selects(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[E1:%.*]] = extractelement <16 x i8> [[BYTES]], i64 1
+; CHECK-NOOPT-NEXT:    [[E2:%.*]] = extractelement <16 x i8> [[BYTES]], i64 2
+; CHECK-NOOPT-NEXT:    [[E3:%.*]] = extractelement <16 x i8> [[BYTES]], i64 3
+; CHECK-NOOPT-NEXT:    [[E4:%.*]] = extractelement <16 x i8> [[BYTES]], i64 4
+; CHECK-NOOPT-NEXT:    [[E5:%.*]] = extractelement <16 x i8> [[BYTES]], i64 5
+; CHECK-NOOPT-NEXT:    [[E6:%.*]] = extractelement <16 x i8> [[BYTES]], i64 6
+; CHECK-NOOPT-NEXT:    [[E7:%.*]] = extractelement <16 x i8> [[BYTES]], i64 7
+; CHECK-NOOPT-NEXT:    [[E8:%.*]] = extractelement <16 x i8> [[BYTES]], i64 8
+; CHECK-NOOPT-NEXT:    [[E9:%.*]] = extractelement <16 x i8> [[BYTES]], i64 9
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 [[E0]], i8 0
+; CHECK-NOOPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 [[E1]], i8 0
+; CHECK-NOOPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 [[E2]], i8 0
+; CHECK-NOOPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 [[E3]], i8 0
+; CHECK-NOOPT-NEXT:    [[S4:%.*]] = select i1 [[COND]], i8 [[E4]], i8 0
+; CHECK-NOOPT-NEXT:    [[S5:%.*]] = select i1 [[COND]], i8 [[E5]], i8 0
+; CHECK-NOOPT-NEXT:    [[S6:%.*]] = select i1 [[COND]], i8 [[E6]], i8 0
+; CHECK-NOOPT-NEXT:    [[S7:%.*]] = select i1 [[COND]], i8 [[E7]], i8 0
+; CHECK-NOOPT-NEXT:    [[S8:%.*]] = select i1 [[COND]], i8 [[E8]], i8 0
+; CHECK-NOOPT-NEXT:    [[S9:%.*]] = select i1 [[COND]], i8 [[E9]], i8 0
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-NOOPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-NOOPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-NOOPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR4:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 4
+; CHECK-NOOPT-NEXT:    store i8 [[S4]], ptr addrspace(1) [[PTR4]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR5:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 5
+; CHECK-NOOPT-NEXT:    store i8 [[S5]], ptr addrspace(1) [[PTR5]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR6:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 6
+; CHECK-NOOPT-NEXT:    store i8 [[S6]], ptr addrspace(1) [[PTR6]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR7:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 7
+; CHECK-NOOPT-NEXT:    store i8 [[S7]], ptr addrspace(1) [[PTR7]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR8:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 8
+; CHECK-NOOPT-NEXT:    store i8 [[S8]], ptr addrspace(1) [[PTR8]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR9:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 9
+; CHECK-NOOPT-NEXT:    store i8 [[S9]], ptr addrspace(1) [[PTR9]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <4 x i32> %src,
+  i1 %cond
+) {
+entry:
+  %bytes = bitcast <4 x i32> %src to <16 x i8>
+  ; Only extract and select 10 elements (indices 0-9)
+  %e0 = extractelement <16 x i8> %bytes, i64 0
+  %e1 = extractelement <16 x i8> %bytes, i64 1
+  %e2 = extractelement <16 x i8> %bytes, i64 2
+  %e3 = extractelement <16 x i8> %bytes, i64 3
+  %e4 = extractelement <16 x i8> %bytes, i64 4
+  %e5 = extractelement <16 x i8> %bytes, i64 5
+  %e6 = extractelement <16 x i8> %bytes, i64 6
+  %e7 = extractelement <16 x i8> %bytes, i64 7
+  %e8 = extractelement <16 x i8> %bytes, i64 8
+  %e9 = extractelement <16 x i8> %bytes, i64 9
+  %s0 = select i1 %cond, i8 %e0, i8 0
+  %s1 = select i1 %cond, i8 %e1, i8 0
+  %s2 = select i1 %cond, i8 %e2, i8 0
+  %s3 = select i1 %cond, i8 %e3, i8 0
+  %s4 = select i1 %cond, i8 %e4, i8 0
+  %s5 = select i1 %cond, i8 %e5, i8 0
+  %s6 = select i1 %cond, i8 %e6, i8 0
+  %s7 = select i1 %cond, i8 %e7, i8 0
+  %s8 = select i1 %cond, i8 %e8, i8 0
+  %s9 = select i1 %cond, i8 %e9, i8 0
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  %ptr4 = getelementptr i8, ptr addrspace(1) %out, i64 4
+  store i8 %s4, ptr addrspace(1) %ptr4, align 1
+  %ptr5 = getelementptr i8, ptr addrspace(1) %out, i64 5
+  store i8 %s5, ptr addrspace(1) %ptr5, align 1
+  %ptr6 = getelementptr i8, ptr addrspace(1) %out, i64 6
+  store i8 %s6, ptr addrspace(1) %ptr6, align 1
+  %ptr7 = getelementptr i8, ptr addrspace(1) %out, i64 7
+  store i8 %s7, ptr addrspace(1) %ptr7, align 1
+  %ptr8 = getelementptr i8, ptr addrspace(1) %out, i64 8
+  store i8 %s8, ptr addrspace(1) %ptr8, align 1
+  %ptr9 = getelementptr i8, ptr addrspace(1) %out, i64 9
+  store i8 %s9, ptr addrspace(1) %ptr9, align 1
+  ret void
+}
+
+; Negative test: should not combine if false value is not zero
+define amdgpu_kernel void @no_combine_non_zero_false(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @no_combine_non_zero_false(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[BUFFER_RESOURCE:%.*]], i32 [[OFFSET:%.*]], i1 [[VALID:%.*]]) #[[ATTR0]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[LOADED:%.*]] = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> [[BUFFER_RESOURCE]], i32 [[OFFSET]], i32 0, i32 0)
+; CHECK-OPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[LOADED]] to <16 x i8>
+; CHECK-OPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-OPT-NEXT:    [[S0:%.*]] = select i1 [[VALID]], i8 [[E0]], i8 1
+; CHECK-OPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @no_combine_non_zero_false(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[BUFFER_RESOURCE:%.*]], i32 [[OFFSET:%.*]], i1 [[VALID:%.*]]) #[[ATTR0]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[LOADED:%.*]] = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> [[BUFFER_RESOURCE]], i32 [[OFFSET]], i32 0, i32 0)
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[LOADED]] to <16 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[VALID]], i8 [[E0]], i8 1
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <4 x i32> %buffer_resource,
+  i32 %offset,
+  i1 %valid
+) {
+entry:
+  %loaded = call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %buffer_resource, i32 %offset, i32 0, i32 0)
+  %bytes = bitcast <4 x i32> %loaded to <16 x i8>
+  %e0 = extractelement <16 x i8> %bytes, i64 0
+  %s0 = select i1 %valid, i8 %e0, i8 1  ; false value is 1, not 0
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  ret void
+}
+
+; Negative test: too few selects (only 4 out of 16, less than half)
+define amdgpu_kernel void @no_combine_too_few_selects(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @no_combine_too_few_selects(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8>
+; CHECK-OPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-OPT-NEXT:    [[E1:%.*]] = extractelement <16 x i8> [[BYTES]], i64 1
+; CHECK-OPT-NEXT:    [[E2:%.*]] = extractelement <16 x i8> [[BYTES]], i64 2
+; CHECK-OPT-NEXT:    [[E3:%.*]] = extractelement <16 x i8> [[BYTES]], i64 3
+; CHECK-OPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 [[E0]], i8 0
+; CHECK-OPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 [[E1]], i8 0
+; CHECK-OPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 [[E2]], i8 0
+; CHECK-OPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 [[E3]], i8 0
+; CHECK-OPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-OPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-OPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-OPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-OPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @no_combine_too_few_selects(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <4 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <4 x i32> [[SRC]] to <16 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <16 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[E1:%.*]] = extractelement <16 x i8> [[BYTES]], i64 1
+; CHECK-NOOPT-NEXT:    [[E2:%.*]] = extractelement <16 x i8> [[BYTES]], i64 2
+; CHECK-NOOPT-NEXT:    [[E3:%.*]] = extractelement <16 x i8> [[BYTES]], i64 3
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 [[E0]], i8 0
+; CHECK-NOOPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 [[E1]], i8 0
+; CHECK-NOOPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 [[E2]], i8 0
+; CHECK-NOOPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 [[E3]], i8 0
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-NOOPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-NOOPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-NOOPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <4 x i32> %src,
+  i1 %cond
+) {
+entry:
+  %bytes = bitcast <4 x i32> %src to <16 x i8>
+  ; Only 4 selects - less than half of 16
+  %e0 = extractelement <16 x i8> %bytes, i64 0
+  %e1 = extractelement <16 x i8> %bytes, i64 1
+  %e2 = extractelement <16 x i8> %bytes, i64 2
+  %e3 = extractelement <16 x i8> %bytes, i64 3
+  %s0 = select i1 %cond, i8 %e0, i8 0
+  %s1 = select i1 %cond, i8 %e1, i8 0
+  %s2 = select i1 %cond, i8 %e2, i8 0
+  %s3 = select i1 %cond, i8 %e3, i8 0
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  ret void
+}
+
+; Negative test: select with extract as false value (wrong operand position)
+define amdgpu_kernel void @no_combine_wrong_operand_order(
+;
+; CHECK-OPT-LABEL: define amdgpu_kernel void @no_combine_wrong_operand_order(
+; CHECK-OPT-SAME: ptr addrspace(1) [[OUT:%.*]], <2 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-OPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-OPT-NEXT:    [[BYTES:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8>
+; CHECK-OPT-NEXT:    [[E0:%.*]] = extractelement <8 x i8> [[BYTES]], i64 0
+; CHECK-OPT-NEXT:    [[E1:%.*]] = extractelement <8 x i8> [[BYTES]], i64 1
+; CHECK-OPT-NEXT:    [[E2:%.*]] = extractelement <8 x i8> [[BYTES]], i64 2
+; CHECK-OPT-NEXT:    [[E3:%.*]] = extractelement <8 x i8> [[BYTES]], i64 3
+; CHECK-OPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 0, i8 [[E0]]
+; CHECK-OPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 0, i8 [[E1]]
+; CHECK-OPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 0, i8 [[E2]]
+; CHECK-OPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 0, i8 [[E3]]
+; CHECK-OPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-OPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-OPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-OPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-OPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-OPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-OPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-OPT-NEXT:    ret void
+;
+; CHECK-NOOPT-LABEL: define amdgpu_kernel void @no_combine_wrong_operand_order(
+; CHECK-NOOPT-SAME: ptr addrspace(1) [[OUT:%.*]], <2 x i32> [[SRC:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NOOPT-NEXT:  [[ENTRY:.*:]]
+; CHECK-NOOPT-NEXT:    [[BYTES:%.*]] = bitcast <2 x i32> [[SRC]] to <8 x i8>
+; CHECK-NOOPT-NEXT:    [[E0:%.*]] = extractelement <8 x i8> [[BYTES]], i64 0
+; CHECK-NOOPT-NEXT:    [[E1:%.*]] = extractelement <8 x i8> [[BYTES]], i64 1
+; CHECK-NOOPT-NEXT:    [[E2:%.*]] = extractelement <8 x i8> [[BYTES]], i64 2
+; CHECK-NOOPT-NEXT:    [[E3:%.*]] = extractelement <8 x i8> [[BYTES]], i64 3
+; CHECK-NOOPT-NEXT:    [[S0:%.*]] = select i1 [[COND]], i8 0, i8 [[E0]]
+; CHECK-NOOPT-NEXT:    [[S1:%.*]] = select i1 [[COND]], i8 0, i8 [[E1]]
+; CHECK-NOOPT-NEXT:    [[S2:%.*]] = select i1 [[COND]], i8 0, i8 [[E2]]
+; CHECK-NOOPT-NEXT:    [[S3:%.*]] = select i1 [[COND]], i8 0, i8 [[E3]]
+; CHECK-NOOPT-NEXT:    store i8 [[S0]], ptr addrspace(1) [[OUT]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR1:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 1
+; CHECK-NOOPT-NEXT:    store i8 [[S1]], ptr addrspace(1) [[PTR1]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR2:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 2
+; CHECK-NOOPT-NEXT:    store i8 [[S2]], ptr addrspace(1) [[PTR2]], align 1
+; CHECK-NOOPT-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr addrspace(1) [[OUT]], i64 3
+; CHECK-NOOPT-NEXT:    store i8 [[S3]], ptr addrspace(1) [[PTR3]], align 1
+; CHECK-NOOPT-NEXT:    ret void
+;
+  ptr addrspace(1) %out,
+  <2 x i32> %src,
+  i1 %cond
+) {
+entry:
+  %bytes = bitcast <2 x i32> %src to <8 x i8>
+  %e0 = extractelement <8 x i8> %bytes, i64 0
+  %e1 = extractelement <8 x i8> %bytes, i64 1
+  %e2 = extractelement <8 x i8> %bytes, i64 2
+  %e3 = extractelement <8 x i8> %bytes, i64 3
+  ; Extract is false value, 0 is true value - should not combine
+  %s0 = select i1 %cond, i8 0, i8 %e0
+  %s1 = select i1 %cond, i8 0, i8 %e1
+  %s2 = select i1 %cond, i8 0, i8 %e2
+  %s3 = select i1 %cond, i8 0, i8 %e3
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  ret void
+}
+
+declare <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg)
+

>From f7c2a62a81fc78d17830e2c55b92217dc698d2ce Mon Sep 17 00:00:00 2001
From: padivedi <padivedi at amd.com>
Date: Tue, 30 Dec 2025 20:12:56 +0530
Subject: [PATCH 2/2] added asm test file

---
 .../AMDGPU/combine-scalar-selects-asm.ll      | 268 ++++++++++++++++++
 1 file changed, 268 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/combine-scalar-selects-asm.ll

diff --git a/llvm/test/CodeGen/AMDGPU/combine-scalar-selects-asm.ll b/llvm/test/CodeGen/AMDGPU/combine-scalar-selects-asm.ll
new file mode 100644
index 0000000000000..0be2a7f95b11c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine-scalar-selects-asm.ll
@@ -0,0 +1,268 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck %s --check-prefix=CHECK-OPT
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-late-codegenprepare-combine-scalar-selects=false < %s | FileCheck %s --check-prefix=CHECK-NOOPT
+
+define amdgpu_kernel void @combine_scalar_selects_v16i8(
+; CHECK-OPT-LABEL: combine_scalar_selects_v16i8:
+; CHECK-OPT:       ; %bb.0: ; %entry
+; CHECK-OPT-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-OPT-NEXT:    s_load_dword s6, s[4:5], 0x10
+; CHECK-OPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-OPT-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; CHECK-OPT-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-OPT-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-OPT-NEXT:    s_bitcmp1_b32 s6, 0
+; CHECK-OPT-NEXT:    s_cselect_b64 vcc, -1, 0
+; CHECK-OPT-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; CHECK-OPT-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
+; CHECK-OPT-NEXT:    s_endpgm
+;
+; CHECK-NOOPT-LABEL: combine_scalar_selects_v16i8:
+; CHECK-NOOPT:       ; %bb.0: ; %entry
+; CHECK-NOOPT-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NOOPT-NEXT:    s_load_dword s6, s[4:5], 0x10
+; CHECK-NOOPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NOOPT-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; CHECK-NOOPT-NEXT:    v_mov_b32_e32 v4, 0
+; CHECK-NOOPT-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NOOPT-NEXT:    global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-NOOPT-NEXT:    s_bitcmp1_b32 s6, 0
+; CHECK-NOOPT-NEXT:    s_cselect_b64 vcc, -1, 0
+; CHECK-NOOPT-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v5, 8, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v7, 24, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v8, 8, v1
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v10, 24, v1
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v11, 8, v2
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v13, 24, v2
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v14, 8, v3
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v16, 24, v3
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v9, 16, v1
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v12, 16, v2
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v15, 16, v3
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v10, 0, v10, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v11, 0, v11, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v13, 0, v13, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v14, 0, v14, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v16, 0, v16, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v9, 0, v9, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v12, 0, v12, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v15, 0, v15, vcc
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v14, 8, v14
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v16, 8, v16
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v11, 8, v11
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v13, 8, v13
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v8, 8, v8
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v10, 8, v10
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v5, 8, v5
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v7, 8, v7
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v3, v3, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v14, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v2, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v11, v12, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v8, v9, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v5, v6, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v3, v3, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v2, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
+; CHECK-NOOPT-NEXT:    s_endpgm
+  ptr addrspace(1) %in,
+  ptr addrspace(1) %out,
+  i1 %valid
+) {
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid.ext = zext i32 %tid to i64
+  %gep = getelementptr <4 x i32>, ptr addrspace(1) %in, i64 %tid.ext
+  %loaded = load <4 x i32>, ptr addrspace(1) %gep, align 16
+  %bytes = bitcast <4 x i32> %loaded to <16 x i8>
+
+  %e0 = extractelement <16 x i8> %bytes, i64 0
+  %e1 = extractelement <16 x i8> %bytes, i64 1
+  %e2 = extractelement <16 x i8> %bytes, i64 2
+  %e3 = extractelement <16 x i8> %bytes, i64 3
+  %e4 = extractelement <16 x i8> %bytes, i64 4
+  %e5 = extractelement <16 x i8> %bytes, i64 5
+  %e6 = extractelement <16 x i8> %bytes, i64 6
+  %e7 = extractelement <16 x i8> %bytes, i64 7
+  %e8 = extractelement <16 x i8> %bytes, i64 8
+  %e9 = extractelement <16 x i8> %bytes, i64 9
+  %e10 = extractelement <16 x i8> %bytes, i64 10
+  %e11 = extractelement <16 x i8> %bytes, i64 11
+  %e12 = extractelement <16 x i8> %bytes, i64 12
+  %e13 = extractelement <16 x i8> %bytes, i64 13
+  %e14 = extractelement <16 x i8> %bytes, i64 14
+  %e15 = extractelement <16 x i8> %bytes, i64 15
+
+  %s0 = select i1 %valid, i8 %e0, i8 0
+  %s1 = select i1 %valid, i8 %e1, i8 0
+  %s2 = select i1 %valid, i8 %e2, i8 0
+  %s3 = select i1 %valid, i8 %e3, i8 0
+  %s4 = select i1 %valid, i8 %e4, i8 0
+  %s5 = select i1 %valid, i8 %e5, i8 0
+  %s6 = select i1 %valid, i8 %e6, i8 0
+  %s7 = select i1 %valid, i8 %e7, i8 0
+  %s8 = select i1 %valid, i8 %e8, i8 0
+  %s9 = select i1 %valid, i8 %e9, i8 0
+  %s10 = select i1 %valid, i8 %e10, i8 0
+  %s11 = select i1 %valid, i8 %e11, i8 0
+  %s12 = select i1 %valid, i8 %e12, i8 0
+  %s13 = select i1 %valid, i8 %e13, i8 0
+  %s14 = select i1 %valid, i8 %e14, i8 0
+  %s15 = select i1 %valid, i8 %e15, i8 0
+
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  %ptr4 = getelementptr i8, ptr addrspace(1) %out, i64 4
+  store i8 %s4, ptr addrspace(1) %ptr4, align 1
+  %ptr5 = getelementptr i8, ptr addrspace(1) %out, i64 5
+  store i8 %s5, ptr addrspace(1) %ptr5, align 1
+  %ptr6 = getelementptr i8, ptr addrspace(1) %out, i64 6
+  store i8 %s6, ptr addrspace(1) %ptr6, align 1
+  %ptr7 = getelementptr i8, ptr addrspace(1) %out, i64 7
+  store i8 %s7, ptr addrspace(1) %ptr7, align 1
+  %ptr8 = getelementptr i8, ptr addrspace(1) %out, i64 8
+  store i8 %s8, ptr addrspace(1) %ptr8, align 1
+  %ptr9 = getelementptr i8, ptr addrspace(1) %out, i64 9
+  store i8 %s9, ptr addrspace(1) %ptr9, align 1
+  %ptr10 = getelementptr i8, ptr addrspace(1) %out, i64 10
+  store i8 %s10, ptr addrspace(1) %ptr10, align 1
+  %ptr11 = getelementptr i8, ptr addrspace(1) %out, i64 11
+  store i8 %s11, ptr addrspace(1) %ptr11, align 1
+  %ptr12 = getelementptr i8, ptr addrspace(1) %out, i64 12
+  store i8 %s12, ptr addrspace(1) %ptr12, align 1
+  %ptr13 = getelementptr i8, ptr addrspace(1) %out, i64 13
+  store i8 %s13, ptr addrspace(1) %ptr13, align 1
+  %ptr14 = getelementptr i8, ptr addrspace(1) %out, i64 14
+  store i8 %s14, ptr addrspace(1) %ptr14, align 1
+  %ptr15 = getelementptr i8, ptr addrspace(1) %out, i64 15
+  store i8 %s15, ptr addrspace(1) %ptr15, align 1
+
+  ret void
+}
+
+; Test with v8i8 from v2i32 (smaller vector)
+define amdgpu_kernel void @combine_scalar_selects_v8i8(
+; CHECK-OPT-LABEL: combine_scalar_selects_v8i8:
+; CHECK-OPT:       ; %bb.0: ; %entry
+; CHECK-OPT-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-OPT-NEXT:    s_load_dword s6, s[4:5], 0x10
+; CHECK-OPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-OPT-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; CHECK-OPT-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-OPT-NEXT:    global_load_dwordx2 v[0:1], v0, s[0:1]
+; CHECK-OPT-NEXT:    s_bitcmp1_b32 s6, 0
+; CHECK-OPT-NEXT:    s_cselect_b64 vcc, -1, 0
+; CHECK-OPT-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; CHECK-OPT-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; CHECK-OPT-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; CHECK-OPT-NEXT:    s_endpgm
+;
+; CHECK-NOOPT-LABEL: combine_scalar_selects_v8i8:
+; CHECK-NOOPT:       ; %bb.0: ; %entry
+; CHECK-NOOPT-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NOOPT-NEXT:    s_load_dword s6, s[4:5], 0x10
+; CHECK-NOOPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NOOPT-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; CHECK-NOOPT-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NOOPT-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NOOPT-NEXT:    global_load_dwordx2 v[0:1], v0, s[0:1]
+; CHECK-NOOPT-NEXT:    s_bitcmp1_b32 s6, 0
+; CHECK-NOOPT-NEXT:    s_cselect_b64 vcc, -1, 0
+; CHECK-NOOPT-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v3, 8, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v5, 24, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v6, 8, v1
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v8, 24, v1
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
+; CHECK-NOOPT-NEXT:    v_lshrrev_b32_e32 v7, 16, v1
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v5, 0, v5, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v6, 0, v6, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v8, 0, v8, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
+; CHECK-NOOPT-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v6, 8, v6
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v8, 8, v8
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v3, 8, v3
+; CHECK-NOOPT-NEXT:    v_lshlrev_b16_e32 v5, 8, v5
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v6, v7, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v3, v4, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; CHECK-NOOPT-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; CHECK-NOOPT-NEXT:    s_endpgm
+  ptr addrspace(1) %in,
+  ptr addrspace(1) %out,
+  i1 %cond
+) {
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid.ext = zext i32 %tid to i64
+  %gep = getelementptr <2 x i32>, ptr addrspace(1) %in, i64 %tid.ext
+  %loaded = load <2 x i32>, ptr addrspace(1) %gep, align 8
+  %bytes = bitcast <2 x i32> %loaded to <8 x i8>
+  %e0 = extractelement <8 x i8> %bytes, i64 0
+  %e1 = extractelement <8 x i8> %bytes, i64 1
+  %e2 = extractelement <8 x i8> %bytes, i64 2
+  %e3 = extractelement <8 x i8> %bytes, i64 3
+  %e4 = extractelement <8 x i8> %bytes, i64 4
+  %e5 = extractelement <8 x i8> %bytes, i64 5
+  %e6 = extractelement <8 x i8> %bytes, i64 6
+  %e7 = extractelement <8 x i8> %bytes, i64 7
+  %s0 = select i1 %cond, i8 %e0, i8 0
+  %s1 = select i1 %cond, i8 %e1, i8 0
+  %s2 = select i1 %cond, i8 %e2, i8 0
+  %s3 = select i1 %cond, i8 %e3, i8 0
+  %s4 = select i1 %cond, i8 %e4, i8 0
+  %s5 = select i1 %cond, i8 %e5, i8 0
+  %s6 = select i1 %cond, i8 %e6, i8 0
+  %s7 = select i1 %cond, i8 %e7, i8 0
+  store i8 %s0, ptr addrspace(1) %out, align 1
+  %ptr1 = getelementptr i8, ptr addrspace(1) %out, i64 1
+  store i8 %s1, ptr addrspace(1) %ptr1, align 1
+  %ptr2 = getelementptr i8, ptr addrspace(1) %out, i64 2
+  store i8 %s2, ptr addrspace(1) %ptr2, align 1
+  %ptr3 = getelementptr i8, ptr addrspace(1) %out, i64 3
+  store i8 %s3, ptr addrspace(1) %ptr3, align 1
+  %ptr4 = getelementptr i8, ptr addrspace(1) %out, i64 4
+  store i8 %s4, ptr addrspace(1) %ptr4, align 1
+  %ptr5 = getelementptr i8, ptr addrspace(1) %out, i64 5
+  store i8 %s5, ptr addrspace(1) %ptr5, align 1
+  %ptr6 = getelementptr i8, ptr addrspace(1) %out, i64 6
+  store i8 %s6, ptr addrspace(1) %ptr6, align 1
+  %ptr7 = getelementptr i8, ptr addrspace(1) %out, i64 7
+  store i8 %s7, ptr addrspace(1) %ptr7, align 1
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()



More information about the llvm-commits mailing list