[clang] [llvm] [AMDGPU][WIP] Add support for i64/f64 readlane, writelane and readfirstlane operations. (PR #89217)

Vikram Hegde via cfe-commits cfe-commits at lists.llvm.org
Sun May 5 22:47:02 PDT 2024


https://github.com/vikramRH updated https://github.com/llvm/llvm-project/pull/89217

>From aa4e757ad3b14ab132a9cc3a0e50b8e4a00e89aa Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Wed, 10 Apr 2024 11:53:16 +0000
Subject: [PATCH 01/11] [AMDGPU] add support for i64 readlane

---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td  |  2 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 43 +++++++++++++++++++++++
 llvm/lib/Target/AMDGPU/SIInstructions.td  |  8 +++++
 3 files changed, 52 insertions(+), 1 deletion(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index ee9a5d7a343980..45e9c10f2306cd 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2184,7 +2184,7 @@ def int_amdgcn_readfirstlane :
 // current wave. Otherwise, the result is undefined.
 def int_amdgcn_readlane :
   ClangBuiltin<"__builtin_amdgcn_readlane">,
-  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+  Intrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty],
             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
 
 // The value to write and lane select arguments must be uniform across the
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0c706d51cb665f..aa8389f6960445 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5065,6 +5065,49 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     MI.eraseFromParent();
     return BB;
   }
+  case AMDGPU::V_READLANE_PSEUDO_B64: {
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
+    const DebugLoc &DL = MI.getDebugLoc();
+
+    MachineOperand &Dest = MI.getOperand(0);
+    MachineOperand &Src0 = MI.getOperand(1);
+    MachineOperand &Src1 = MI.getOperand(2);
+
+    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+
+    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+    const TargetRegisterClass *Src1RC = Src1.isReg()
+                                        ? MRI.getRegClass(Src1.getReg())
+                                        : &AMDGPU::SReg_32RegClass;
+    
+    const TargetRegisterClass *Src0SubRC =
+        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
+    
+    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+
+    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+    
+    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
+                           .add(SrcReg0Sub0)
+                           .add(Src1);
+    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
+                           .add(SrcReg0Sub1)
+                           .add(Src1);
+
+    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+          .addReg(DestSub0)
+          .addImm(AMDGPU::sub0)
+          .addReg(DestSub1)
+          .addImm(AMDGPU::sub1);
+
+    MI.eraseFromParent();
+    return BB;
+  }
   case AMDGPU::SI_INIT_M0: {
     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d6d49889656bbc..0f02b4ce518ac4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -288,6 +288,14 @@ def V_SUB_U64_PSEUDO : VPseudoInstSI <
 >;
 } // End usesCustomInserter = 1, Defs = [VCC]
 
+
+let usesCustomInserter = 1 in {
+  def V_READLANE_PSEUDO_B64 : VPseudoInstSI <
+  (outs SReg_64:$sdst), (ins VReg_64:$src0, SSrc_b32:$src1),
+  [(set i64:$sdst, (int_amdgcn_readlane i64:$src0, i32:$src1))]
+  >;
+}
+
 let usesCustomInserter = 1, Defs = [SCC] in {
 def S_ADD_U64_PSEUDO : SPseudoInstSI <
   (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1),

>From b895dd5861bfa9781eb8990c86edd11d466c48f3 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Fri, 12 Apr 2024 10:27:42 +0000
Subject: [PATCH 02/11] add support for i64 readfirstlane and writelane
 intrinsics

---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  10 +-
 .../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp   |  20 +--
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  95 ++++++++++++++
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  12 +-
 llvm/lib/Target/AMDGPU/VOP1Instructions.td    |   2 +-
 .../UniformityAnalysis/AMDGPU/intrinsics.ll   |   6 +-
 .../atomic_optimizations_mul_one.ll           |  13 +-
 .../test/CodeGen/AMDGPU/global-atomic-scan.ll |  48 +++----
 .../AMDGPU/global_atomic_optimizer_fp_rtn.ll  | 120 +++++++++---------
 .../AMDGPU/global_atomics_iterative_scan.ll   |   2 +-
 .../global_atomics_iterative_scan_fp.ll       |   8 +-
 .../global_atomics_optimizer_fp_no_rtn.ll     |  24 ++--
 .../InstCombine/AMDGPU/amdgcn-intrinsics.ll   |  32 ++---
 13 files changed, 248 insertions(+), 144 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 45e9c10f2306cd..34feee1c56be82 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2177,7 +2177,7 @@ def int_amdgcn_wave_reduce_umax : AMDGPUWaveReduce;
 
 def int_amdgcn_readfirstlane :
   ClangBuiltin<"__builtin_amdgcn_readfirstlane">,
-  Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+  Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
 
 // The lane argument must be uniform across the currently active threads of the
@@ -2192,10 +2192,10 @@ def int_amdgcn_readlane :
 // undefined.
 def int_amdgcn_writelane :
   ClangBuiltin<"__builtin_amdgcn_writelane">,
-  Intrinsic<[llvm_i32_ty], [
-    llvm_i32_ty,    // uniform value to write: returned by the selected lane
-    llvm_i32_ty,    // uniform lane select
-    llvm_i32_ty     // returned by all lanes other than the selected one
+  Intrinsic<[llvm_any_ty], [
+    LLVMMatchType<0>,    // uniform value to write: returned by the selected lane
+    llvm_i32_ty,        // uniform lane select
+    LLVMMatchType<0>     // returned by all lanes other than the selected one
   ],
   [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
 >;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index dbb3de76b4ddae..5b3fa148e56192 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -433,7 +433,7 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
   // Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and
   // combine them with a scalar operation.
   Function *ReadLane =
-      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
+      Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
   V = B.CreateBitCast(V, IntNTy);
   Value *Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)});
   Value *Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)});
@@ -493,7 +493,7 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
     if (!ST->isWave32()) {
       // Combine lane 31 into lanes 32..63.
       V = B.CreateBitCast(V, IntNTy);
-      Value *const Lane31 = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {},
+      Value *const Lane31 = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, B.getInt32Ty(),
                                               {V, B.getInt32(31)});
 
       Value *UpdateDPPCall = B.CreateCall(
@@ -524,9 +524,9 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
                       B.getInt32(0xf), B.getFalse()});
   } else {
     Function *ReadLane =
-        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, {});
+        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
     Function *WriteLane =
-        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, {});
+        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, B.getInt32Ty());
 
     // On GFX10 all DPP operations are confined to a single row. To get cross-
     // row operations we have to use permlane or readlane.
@@ -599,7 +599,7 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
   // Get the value required for atomic operation
   V = B.CreateBitCast(V, IntNTy);
   Value *LaneValue =
-      B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {}, {V, LaneIdxInt});
+      B.CreateIntrinsic(Intrinsic::amdgcn_readlane, B.getInt32Ty(), {V, LaneIdxInt});
   LaneValue = B.CreateBitCast(LaneValue, Ty);
 
   // Perform writelane if intermediate scan results are required later in the
@@ -607,7 +607,7 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
   Value *OldValue = nullptr;
   if (NeedResult) {
     OldValue =
-        B.CreateIntrinsic(Intrinsic::amdgcn_writelane, {},
+        B.CreateIntrinsic(Intrinsic::amdgcn_writelane, B.getInt32Ty(),
                           {B.CreateBitCast(Accumulator, IntNTy), LaneIdxInt,
                            B.CreateBitCast(OldValuePhi, IntNTy)});
     OldValue = B.CreateBitCast(OldValue, Ty);
@@ -789,7 +789,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
         Value *const LastLaneIdx = B.getInt32(ST->getWavefrontSize() - 1);
         assert(TyBitWidth == 32);
         NewV = B.CreateBitCast(NewV, IntNTy);
-        NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, {},
+        NewV = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, B.getInt32Ty(),
                                  {NewV, LastLaneIdx});
         NewV = B.CreateBitCast(NewV, Ty);
       }
@@ -926,9 +926,9 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
       Value *const ExtractHi =
           B.CreateTrunc(B.CreateLShr(CastedPhi, 32), Int32Ty);
       CallInst *const ReadFirstLaneLo =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo);
+          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractLo);
       CallInst *const ReadFirstLaneHi =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi);
+          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractHi);
       Value *const PartialInsert = B.CreateInsertElement(
           PoisonValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0));
       Value *const Insert =
@@ -937,7 +937,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
     } else if (TyBitWidth == 32) {
       Value *CastedPhi = B.CreateBitCast(PHI, IntNTy);
       BroadcastI =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, CastedPhi);
+          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, CastedPhi);
       BroadcastI = B.CreateBitCast(BroadcastI, Ty);
 
     } else {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index aa8389f6960445..4ae17ccfe80674 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5108,6 +5108,101 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     MI.eraseFromParent();
     return BB;
   }
+  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
+    const DebugLoc &DL = MI.getDebugLoc();
+
+    MachineOperand &Dest = MI.getOperand(0);
+    MachineOperand &Src0 = MI.getOperand(1);
+
+    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+
+    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+
+    const TargetRegisterClass *Src0SubRC =
+        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
+    
+    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+
+    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+
+    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
+                           .add(SrcReg0Sub0);
+    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
+                           .add(SrcReg0Sub1);
+    
+    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+          .addReg(DestSub0)
+          .addImm(AMDGPU::sub0)
+          .addReg(DestSub1)
+          .addImm(AMDGPU::sub1);
+
+    MI.eraseFromParent();
+    return BB;
+  }
+  case AMDGPU::V_WRITELANE_PSEUDO_B64: {
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
+    const DebugLoc &DL = MI.getDebugLoc();
+
+    MachineOperand &Dest = MI.getOperand(0);
+    MachineOperand &Src0 = MI.getOperand(1);
+    MachineOperand &Src1 = MI.getOperand(2);
+    MachineOperand &Src2 = MI.getOperand(3);
+
+    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+
+    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+    const TargetRegisterClass *Src1RC = Src1.isReg()
+                                        ? MRI.getRegClass(Src1.getReg())
+                                        : &AMDGPU::SReg_32RegClass;
+    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());                                     
+    
+    const TargetRegisterClass *Src0SubRC =
+        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
+    
+    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+
+    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+
+    
+    const TargetRegisterClass *Src2SubRC =
+        TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+    
+    MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
+
+    MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);        
+    
+    
+    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
+                           .add(SrcReg0Sub0)
+                           .add(Src1)
+                           .add(SrcReg2Sub0);
+    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
+                           .add(SrcReg0Sub1)
+                           .add(Src1)
+                           .add(SrcReg2Sub1);
+    
+    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+          .addReg(DestSub0)
+          .addImm(AMDGPU::sub0)
+          .addReg(DestSub1)
+          .addImm(AMDGPU::sub1);
+
+    MI.eraseFromParent();
+    return BB;   
+  }
   case AMDGPU::SI_INIT_M0: {
     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 0f02b4ce518ac4..decd59be5eb29d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -294,6 +294,16 @@ let usesCustomInserter = 1 in {
   (outs SReg_64:$sdst), (ins VReg_64:$src0, SSrc_b32:$src1),
   [(set i64:$sdst, (int_amdgcn_readlane i64:$src0, i32:$src1))]
   >;
+
+  def V_READFIRSTLANE_PSEUDO_B64 : VPseudoInstSI <
+  (outs SReg_64:$sdst), (ins VReg_64:$src0),
+  [(set i64:$sdst, (int_amdgcn_readfirstlane i64:$src0))]
+  >;
+  
+  def V_WRITELANE_PSEUDO_B64 : VPseudoInstSI <
+  (outs VReg_64:$sdst), (ins SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2),
+  [(set i64:$sdst, (int_amdgcn_writelane i64:$src0, i32:$src1, i64:$src2))]
+  >;
 }
 
 let usesCustomInserter = 1, Defs = [SCC] in {
@@ -3413,7 +3423,7 @@ def : GCNPat<
 // FIXME: Should also do this for readlane, but tablegen crashes on
 // the ignored src1.
 def : GCNPat<
-  (int_amdgcn_readfirstlane (i32 imm:$src)),
+  (i32 (int_amdgcn_readfirstlane (i32 imm:$src))),
   (S_MOV_B32 SReg_32:$src)
 >;
 
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 2341e0d9d32bb4..0ee80f45c91603 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -112,7 +112,7 @@ class getVOP1Pat <SDPatternOperator node, VOPProfile P> : LetDummies {
         !if(P.HasOMod,
             [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3OMods P.Src0VT:$src0,
                                                   i1:$clamp, i32:$omod))))],
-            [(set P.DstVT:$vdst, (node P.Src0RC32:$src0))]
+            [(set P.DstVT:$vdst, (node (P.Src0VT P.Src0RC32:$src0)))]
         )
     );
 }
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index 26c85e83b53adc..74d2f53d7b317c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -56,9 +56,9 @@ define amdgpu_kernel void @mov_dpp8(ptr addrspace(1) %out, i32 %in) #0 {
   ret void
 }
 
-; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
+; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.writelane.i32(i32 0, i32 1, i32 2)
 define amdgpu_kernel void @writelane(ptr addrspace(1) %out) #0 {
-  %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
+  %tmp0 = call i32 @llvm.amdgcn.writelane.i32(i32 0, i32 1, i32 2)
   store i32 %tmp0, ptr addrspace(1) %out
   ret void
 }
@@ -237,7 +237,7 @@ declare i32 @llvm.amdgcn.permlanex16.var(i32, i32, i32, i1, i1) #1
 declare i32 @llvm.amdgcn.mov.dpp.i32(i32, i32, i32, i32, i1) #1
 declare i32 @llvm.amdgcn.mov.dpp8.i32(i32, i32) #1
 declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #1
-declare i32 @llvm.amdgcn.writelane(i32, i32, i32) #1
+declare i32 @llvm.amdgcn.writelane.i32(i32, i32, i32) #1
 declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f32.v16f16(<16 x half>, <16 x half> , <8 x float>) #1
 declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v16i16(<16 x i16>, <16 x i16> , <8 x float>) #1
 declare <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v16f16.v16f16(<16 x half>, <16 x half> , <16 x half>, i1 immarg) #1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll
index 220dc70165e87c..bdfafa89cd0477 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll
@@ -1,5 +1,4 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-atomic-optimizer %s | FileCheck -check-prefix=IR %s
 ; RUN: llc -global-isel -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 
@@ -74,7 +73,7 @@ define amdgpu_cs void @atomic_add_and_format(<4 x i32> inreg %arg) {
 ; IR-NEXT:    br label [[TMP11]]
 ; IR:       11:
 ; IR-NEXT:    [[TMP12:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP10]], [[TMP9]] ]
-; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP12]])
+; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP12]])
 ; IR-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP5]]
 ; IR-NEXT:    call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> [[ARG]], <4 x i32> [[ARG]], i32 [[TMP14]], i32 0, i32 0, i32 0)
 ; IR-NEXT:    ret void
@@ -172,7 +171,7 @@ define amdgpu_cs void @atomic_sub_and_format(<4 x i32> inreg %arg) {
 ; IR-NEXT:    br label [[TMP11]]
 ; IR:       11:
 ; IR-NEXT:    [[TMP12:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP10]], [[TMP9]] ]
-; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP12]])
+; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP12]])
 ; IR-NEXT:    [[TMP14:%.*]] = sub i32 [[TMP13]], [[TMP5]]
 ; IR-NEXT:    call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> [[ARG]], <4 x i32> [[ARG]], i32 [[TMP14]], i32 0, i32 0, i32 0)
 ; IR-NEXT:    ret void
@@ -273,7 +272,7 @@ define amdgpu_cs void @atomic_xor_and_format(<4 x i32> inreg %arg) {
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = and i32 [[TMP5]], 1
 ; IR-NEXT:    [[TMP16:%.*]] = xor i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> [[ARG]], <4 x i32> [[ARG]], i32 [[TMP16]], i32 0, i32 0, i32 0)
@@ -374,7 +373,7 @@ define amdgpu_cs void @atomic_ptr_add_and_format(ptr addrspace(8) inreg %arg) {
 ; IR-NEXT:    br label [[TMP11]]
 ; IR:       11:
 ; IR-NEXT:    [[TMP12:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP10]], [[TMP9]] ]
-; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP12]])
+; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP12]])
 ; IR-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP5]]
 ; IR-NEXT:    [[ARG_INT:%.*]] = ptrtoint ptr addrspace(8) [[ARG]] to i128
 ; IR-NEXT:    [[ARG_VEC:%.*]] = bitcast i128 [[ARG_INT]] to <4 x i32>
@@ -476,7 +475,7 @@ define amdgpu_cs void @atomic_ptr_sub_and_format(ptr addrspace(8) inreg %arg) {
 ; IR-NEXT:    br label [[TMP11]]
 ; IR:       11:
 ; IR-NEXT:    [[TMP12:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP10]], [[TMP9]] ]
-; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP12]])
+; IR-NEXT:    [[TMP13:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP12]])
 ; IR-NEXT:    [[TMP14:%.*]] = sub i32 [[TMP13]], [[TMP5]]
 ; IR-NEXT:    [[ARG_INT:%.*]] = ptrtoint ptr addrspace(8) [[ARG]] to i128
 ; IR-NEXT:    [[ARG_VEC:%.*]] = bitcast i128 [[ARG_INT]] to <4 x i32>
@@ -581,7 +580,7 @@ define amdgpu_cs void @atomic_ptr_xor_and_format(ptr addrspace(8) inreg %arg) {
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[DOTENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = and i32 [[TMP5]], 1
 ; IR-NEXT:    [[TMP16:%.*]] = xor i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    [[ARG_INT:%.*]] = ptrtoint ptr addrspace(8) [[ARG]] to i128
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomic-scan.ll b/llvm/test/CodeGen/AMDGPU/global-atomic-scan.ll
index 6b47f81bccb71e..6c61c837881c45 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomic-scan.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomic-scan.ll
@@ -130,7 +130,7 @@ define amdgpu_kernel void @atomic_add_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -193,7 +193,7 @@ define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(ptr addrspace(1) %ou
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -251,7 +251,7 @@ define amdgpu_kernel void @atomic_add_i32_ret(ptr addrspace(1) %out, ptr addrspa
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -310,7 +310,7 @@ define amdgpu_kernel void @atomic_add_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = add i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -364,7 +364,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -1, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = and i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    store i32 [[TMP13]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -421,7 +421,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(ptr addrspace(1) %ou
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -1, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = and i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    store i32 [[TMP13]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -473,7 +473,7 @@ define amdgpu_kernel void @atomic_and_i32_ret(ptr addrspace(1) %out, ptr addrspa
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -1, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = and i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    store i32 [[TMP13]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -526,7 +526,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -1, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = and i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    store i32 [[TMP13]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -586,7 +586,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -649,7 +649,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(ptr addrspace(1) %ou
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -707,7 +707,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret(ptr addrspace(1) %out, ptr addrspa
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -766,7 +766,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP12]]
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP13]])
+; IR-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP13]])
 ; IR-NEXT:    [[TMP15:%.*]] = mul i32 [[IN]], [[TMP5]]
 ; IR-NEXT:    [[TMP16:%.*]] = sub i32 [[TMP14]], [[TMP15]]
 ; IR-NEXT:    store i32 [[TMP16]], ptr addrspace(1) [[OUT2:%.*]], align 4
@@ -820,7 +820,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -2147483648, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -878,7 +878,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %ou
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -2147483648, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -931,7 +931,7 @@ define amdgpu_kernel void @atomic_max_i32_ret(ptr addrspace(1) %out, ptr addrspa
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -2147483648, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -985,7 +985,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 -2147483648, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1040,7 +1040,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 0, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1098,7 +1098,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %o
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 0, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1151,7 +1151,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret(ptr addrspace(1) %out, ptr addrsp
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 0, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1205,7 +1205,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 0, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1260,7 +1260,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 2147483647, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp slt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1318,7 +1318,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %ou
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 2147483647, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp slt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1371,7 +1371,7 @@ define amdgpu_kernel void @atomic_min_i32_ret(ptr addrspace(1) %out, ptr addrspa
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 2147483647, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp slt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
@@ -1425,7 +1425,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr
 ; IR-NEXT:    br label [[TMP9]]
 ; IR:       9:
 ; IR-NEXT:    [[TMP10:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[TMP8]], [[TMP7]] ]
-; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP10]])
+; IR-NEXT:    [[TMP11:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP10]])
 ; IR-NEXT:    [[TMP12:%.*]] = select i1 [[TMP6]], i32 2147483647, i32 [[IN]]
 ; IR-NEXT:    [[TMP13:%.*]] = icmp slt i32 [[TMP11]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index b71728096093cd..baaf50377338c6 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -29,7 +29,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_agent_scope_uns
 ; IR:       16:
 ; IR-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]])
+; IR-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]])
 ; IR-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-NEXT:    [[TMP21:%.*]] = uitofp i32 [[TMP8]] to float
 ; IR-NEXT:    [[TMP22:%.*]] = fmul float [[VAL]], [[TMP21]]
@@ -62,7 +62,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]])
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = fadd float [[TMP16]], [[TMP28:%.*]]
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -76,11 +76,11 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]])
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]])
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]])
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]])
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = fadd float [[ACCUMULATOR]], [[TMP24]]
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -120,7 +120,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = fadd float [[TMP22]], [[TMP23]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63)
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63)
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]])
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -131,7 +131,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]])
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]])
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]])
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = fadd float [[TMP37]], [[TMP38]]
@@ -167,7 +167,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
 ; IR-ITERATIVE:       16:
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -199,7 +199,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
 ; IR-DPP:       16:
 ; IR-DPP-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-DPP-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -232,7 +232,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -246,11 +246,11 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -290,7 +290,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -301,7 +301,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -337,7 +337,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
 ; IR-ITERATIVE:       16:
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -369,7 +369,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
 ; IR-DPP:       16:
 ; IR-DPP-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-DPP-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -402,7 +402,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -416,11 +416,11 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -460,7 +460,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -471,7 +471,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -503,7 +503,7 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_uni_value_agent_scope_uns
 ; IR:       12:
 ; IR-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
 ; IR-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
+; IR-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]])
 ; IR-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-NEXT:    [[TMP17:%.*]] = uitofp i32 [[TMP8]] to float
 ; IR-NEXT:    [[TMP18:%.*]] = select i1 [[TMP9]], float 0x7FF0000000000000, float [[VAL]]
@@ -536,7 +536,7 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]])
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]])
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.minnum.f32(float [[TMP16]], float [[TMP28:%.*]])
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -550,11 +550,11 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]])
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]])
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]])
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]])
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = call float @llvm.minnum.f32(float [[ACCUMULATOR]], float [[TMP24]])
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -594,7 +594,7 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.minnum.f32(float [[TMP22]], float [[TMP23]])
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF0000000000000, float [[TMP24]], i32 312, i32 15, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63)
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63)
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]])
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -605,7 +605,7 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]])
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]])
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]])
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = call float @llvm.minnum.f32(float [[TMP37]], float [[TMP38]])
@@ -637,7 +637,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = select i1 [[TMP9]], float 0xFFF0000000000000, float [[VAL]]
@@ -665,7 +665,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
 ; IR-DPP:       12:
 ; IR-DPP-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
 ; IR-DPP-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-DPP-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-DPP-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP18:%.*]] = select i1 [[TMP9]], float 0xFFF0000000000000, float [[VAL]]
@@ -698,7 +698,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -712,11 +712,11 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = call float @llvm.experimental.constrained.maxnum.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -756,7 +756,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP22]], float [[TMP23]], metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0xFFF0000000000000, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -767,7 +767,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP37]], float [[TMP38]], metadata !"fpexcept.strict") #[[ATTR8]]
@@ -803,7 +803,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
 ; IR-ITERATIVE:       16:
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -835,7 +835,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
 ; IR-DPP:       16:
 ; IR-DPP-NEXT:    [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
 ; IR-DPP-NEXT:    [[TMP18:%.*]] = bitcast float [[TMP17]] to i32
-; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP18]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP18]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = bitcast i32 [[TMP19]] to float
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -868,7 +868,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
 ; IR-ITERATIVE:       12:
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = bitcast float [[TMP13]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP14]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP14]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast i32 [[TMP15]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP16]], float [[TMP28:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    br label [[TMP18]]
@@ -882,11 +882,11 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP22]], i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = bitcast i32 [[TMP23]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = bitcast float [[ACCUMULATOR]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast float [[OLDVALUEPHI]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.writelane.i32(i32 [[TMP25]], i32 [[TMP21]], i32 [[TMP26]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP28]] = bitcast i32 [[TMP27]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP29]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP24]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP30:%.*]] = shl i64 1, [[TMP20]]
@@ -926,7 +926,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP24]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP26]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP27:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP26]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = bitcast i32 [[TMP27]] to float
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP28]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP30:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -937,7 +937,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
 ; IR-DPP:       33:
 ; IR-DPP-NEXT:    [[TMP34:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP32]], [[TMP31]] ]
 ; IR-DPP-NEXT:    [[TMP35:%.*]] = bitcast float [[TMP34]] to i32
-; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP35]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP36:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP35]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP37:%.*]] = bitcast i32 [[TMP36]] to float
 ; IR-DPP-NEXT:    [[TMP38:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP39:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP37]], float [[TMP38]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1084,8 +1084,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_s
 ; IR-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]])
-; IR-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]])
+; IR-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]])
+; IR-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]])
 ; IR-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1136,8 +1136,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
 ; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1174,8 +1174,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
 ; IR-DPP-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1226,8 +1226,8 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
 ; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1264,8 +1264,8 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
 ; IR-DPP-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1312,8 +1312,8 @@ define amdgpu_ps double @global_atomic_fmin_double_uni_address_uni_value_agent_s
 ; IR-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-NEXT:    [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
 ; IR-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
-; IR-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]])
-; IR-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]])
+; IR-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP15]])
+; IR-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP17]])
 ; IR-NEXT:    [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
 ; IR-NEXT:    [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
 ; IR-NEXT:    [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
@@ -1360,8 +1360,8 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP17]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
 ; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
@@ -1394,8 +1394,8 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
 ; IR-DPP-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-DPP-NEXT:    [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
 ; IR-DPP-NEXT:    [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
-; IR-DPP-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR8]]
-; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP15]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP17]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
@@ -1446,8 +1446,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
 ; IR-ITERATIVE-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-ITERATIVE-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-ITERATIVE-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-ITERATIVE-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
@@ -1484,8 +1484,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
 ; IR-DPP-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
 ; IR-DPP-NEXT:    [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
-; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
-; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP21]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan.ll
index f954560d0f5ca9..4b4c99b3cd14c8 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan.ll
@@ -83,7 +83,7 @@ define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out, ptr addrspace(
 ; IR-NEXT:    [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP6]], [[ENTRY]] ], [ [[TMP16:%.*]], [[COMPUTELOOP]] ]
 ; IR-NEXT:    [[TMP10:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-NEXT:    [[TMP11:%.*]] = trunc i64 [[TMP10]] to i32
-; IR-NEXT:    [[TMP12:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[VALUE]], i32 [[TMP11]])
+; IR-NEXT:    [[TMP12:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[VALUE]], i32 [[TMP11]])
 ; IR-NEXT:    [[TMP13]] = add i32 [[ACCUMULATOR]], [[TMP12]]
 ; IR-NEXT:    [[TMP14:%.*]] = shl i64 1, [[TMP10]]
 ; IR-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], -1
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
index 86e3d9338e0780..38823681d1bb56 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
@@ -69,7 +69,7 @@ define amdgpu_kernel void @global_atomic_fadd_div_value(ptr addrspace(1) %ptr) #
 ; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = bitcast float [[DIVVALUE]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP13]], i32 [[TMP12]])
+; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP13]], i32 [[TMP12]])
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = bitcast i32 [[TMP14]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP16]] = fadd float [[ACCUMULATOR]], [[TMP15]]
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = shl i64 1, [[TMP11]]
@@ -107,7 +107,7 @@ define amdgpu_kernel void @global_atomic_fadd_div_value(ptr addrspace(1) %ptr) #
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 323, i32 12, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = fadd float [[TMP20]], [[TMP21]]
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = bitcast float [[TMP22]] to i32
-; IR-DPP-NEXT:    [[TMP24:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP23]], i32 63)
+; IR-DPP-NEXT:    [[TMP24:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP23]], i32 63)
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast i32 [[TMP24]] to float
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]])
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = icmp eq i32 [[TMP6]], 0
@@ -191,7 +191,7 @@ define amdgpu_kernel void @global_atomic_fsub_div_value(ptr addrspace(1) %ptr) #
 ; IR-ITERATIVE-NEXT:    [[TMP11:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP13:%.*]] = bitcast float [[DIVVALUE]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP13]], i32 [[TMP12]])
+; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP13]], i32 [[TMP12]])
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = bitcast i32 [[TMP14]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP16]] = fadd float [[ACCUMULATOR]], [[TMP15]]
 ; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = shl i64 1, [[TMP11]]
@@ -229,7 +229,7 @@ define amdgpu_kernel void @global_atomic_fsub_div_value(ptr addrspace(1) %ptr) #
 ; IR-DPP-NEXT:    [[TMP21:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP20]], i32 323, i32 12, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP22:%.*]] = fadd float [[TMP20]], [[TMP21]]
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = bitcast float [[TMP22]] to i32
-; IR-DPP-NEXT:    [[TMP24:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP23]], i32 63)
+; IR-DPP-NEXT:    [[TMP24:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP23]], i32 63)
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast i32 [[TMP24]] to float
 ; IR-DPP-NEXT:    [[TMP26:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP25]])
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = icmp eq i32 [[TMP6]], 0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index b9234f47df1928..83453354320fee 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -61,7 +61,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_scope_agent_scop
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]])
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]])
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = fadd float [[ACCUMULATOR]], [[TMP18]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -100,7 +100,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_scope_agent_scop
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = fadd float [[TMP22]], [[TMP23]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63)
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63)
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]])
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -196,7 +196,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -235,7 +235,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -331,7 +331,7 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -370,7 +370,7 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -438,7 +438,7 @@ define amdgpu_ps void @global_atomic_fmin_uni_address_div_value_agent_scope_unsa
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]])
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]])
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = call float @llvm.minnum.f32(float [[ACCUMULATOR]], float [[TMP18]])
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -477,7 +477,7 @@ define amdgpu_ps void @global_atomic_fmin_uni_address_div_value_agent_scope_unsa
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF0000000000000, float [[TMP22]], i32 323, i32 12, i32 15, i1 false)
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.minnum.f32(float [[TMP22]], float [[TMP23]])
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63)
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63)
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]])
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -565,7 +565,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = call float @llvm.experimental.constrained.maxnum.f32(float [[ACCUMULATOR]], float [[TMP18]], metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -604,7 +604,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0xFFF0000000000000, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP22]], float [[TMP23]], metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
@@ -700,7 +700,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
 ; IR-ITERATIVE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
 ; IR-ITERATIVE-NEXT:    [[TMP16:%.*]] = bitcast float [[VAL:%.*]] to i32
-; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT:    [[TMP17:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP16]], i32 [[TMP15]]) #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP18:%.*]] = bitcast i32 [[TMP17]] to float
 ; IR-ITERATIVE-NEXT:    [[TMP19]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
 ; IR-ITERATIVE-NEXT:    [[TMP20:%.*]] = shl i64 1, [[TMP14]]
@@ -739,7 +739,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
 ; IR-DPP-NEXT:    [[TMP23:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP22]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP24:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP22]], float [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP25:%.*]] = bitcast float [[TMP24]] to i32
-; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[TMP25]], i32 63) #[[ATTR8]]
+; IR-DPP-NEXT:    [[TMP26:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TMP25]], i32 63) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP27:%.*]] = bitcast i32 [[TMP26]] to float
 ; IR-DPP-NEXT:    [[TMP28:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP27]]) #[[ATTR8]]
 ; IR-DPP-NEXT:    [[TMP29:%.*]] = icmp eq i32 [[TMP8]], 0
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 94c32e3cbe99f7..483ea8ad57d1b3 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
@@ -2714,7 +2714,7 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
 
 define amdgpu_kernel void @readfirstlane_constant(i32 %arg) {
 ; CHECK-LABEL: @readfirstlane_constant(
-; CHECK-NEXT:    [[VAR:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[VAR:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    store volatile i32 [[VAR]], ptr undef, align 4
 ; CHECK-NEXT:    store volatile i32 0, ptr undef, align 4
 ; CHECK-NEXT:    store volatile i32 123, ptr undef, align 4
@@ -2737,7 +2737,7 @@ define amdgpu_kernel void @readfirstlane_constant(i32 %arg) {
 
 define i32 @readfirstlane_idempotent(i32 %arg) {
 ; CHECK-LABEL: @readfirstlane_idempotent(
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    ret i32 [[READ0]]
 ;
   %read0 = call i32 @llvm.amdgcn.readfirstlane(i32 %arg)
@@ -2748,7 +2748,7 @@ define i32 @readfirstlane_idempotent(i32 %arg) {
 
 define i32 @readfirstlane_readlane(i32 %arg) {
 ; CHECK-LABEL: @readfirstlane_readlane(
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    ret i32 [[READ0]]
 ;
   %read0 = call i32 @llvm.amdgcn.readfirstlane(i32 %arg)
@@ -2759,10 +2759,10 @@ define i32 @readfirstlane_readlane(i32 %arg) {
 define i32 @readfirstlane_readfirstlane_different_block(i32 %arg) {
 ; CHECK-LABEL: @readfirstlane_readfirstlane_different_block(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[READ0]])
+; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[READ0]])
 ; CHECK-NEXT:    ret i32 [[READ1]]
 ;
 bb0:
@@ -2777,10 +2777,10 @@ bb1:
 define i32 @readfirstlane_readlane_different_block(i32 %arg) {
 ; CHECK-LABEL: @readfirstlane_readlane_different_block(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[ARG:%.*]], i32 0)
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 0)
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[READ0]])
+; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[READ0]])
 ; CHECK-NEXT:    ret i32 [[READ1]]
 ;
 bb0:
@@ -2800,7 +2800,7 @@ declare i32 @llvm.amdgcn.readlane(i32, i32)
 
 define amdgpu_kernel void @readlane_constant(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: @readlane_constant(
-; CHECK-NEXT:    [[VAR:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[ARG:%.*]], i32 7)
+; CHECK-NEXT:    [[VAR:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 7)
 ; CHECK-NEXT:    store volatile i32 [[VAR]], ptr undef, align 4
 ; CHECK-NEXT:    store volatile i32 0, ptr undef, align 4
 ; CHECK-NEXT:    store volatile i32 123, ptr undef, align 4
@@ -2823,7 +2823,7 @@ define amdgpu_kernel void @readlane_constant(i32 %arg, i32 %lane) {
 
 define i32 @readlane_idempotent(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: @readlane_idempotent(
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[ARG:%.*]], i32 [[LANE:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 [[LANE:%.*]])
 ; CHECK-NEXT:    ret i32 [[READ0]]
 ;
   %read0 = call i32 @llvm.amdgcn.readlane(i32 %arg, i32 %lane)
@@ -2833,8 +2833,8 @@ define i32 @readlane_idempotent(i32 %arg, i32 %lane) {
 
 define i32 @readlane_idempotent_different_lanes(i32 %arg, i32 %lane0, i32 %lane1) {
 ; CHECK-LABEL: @readlane_idempotent_different_lanes(
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[ARG:%.*]], i32 [[LANE0:%.*]])
-; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[READ0]], i32 [[LANE1:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 [[LANE0:%.*]])
+; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 [[LANE1:%.*]])
 ; CHECK-NEXT:    ret i32 [[READ1]]
 ;
   %read0 = call i32 @llvm.amdgcn.readlane(i32 %arg, i32 %lane0)
@@ -2844,7 +2844,7 @@ define i32 @readlane_idempotent_different_lanes(i32 %arg, i32 %lane0, i32 %lane1
 
 define i32 @readlane_readfirstlane(i32 %arg) {
 ; CHECK-LABEL: @readlane_readfirstlane(
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    ret i32 [[READ0]]
 ;
   %read0 = call i32 @llvm.amdgcn.readfirstlane(i32 %arg)
@@ -2855,10 +2855,10 @@ define i32 @readlane_readfirstlane(i32 %arg) {
 define i32 @readlane_idempotent_different_block(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: @readlane_idempotent_different_block(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[ARG:%.*]], i32 [[LANE:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 [[LANE:%.*]])
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[READ0]], i32 [[LANE]])
+; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 [[LANE]])
 ; CHECK-NEXT:    ret i32 [[READ1]]
 ;
 bb0:
@@ -2874,10 +2874,10 @@ bb1:
 define i32 @readlane_readfirstlane_different_block(i32 %arg) {
 ; CHECK-LABEL: @readlane_readfirstlane_different_block(
 ; CHECK-NEXT:  bb0:
-; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[ARG:%.*]])
+; CHECK-NEXT:    [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
 ; CHECK-NEXT:    br label [[BB1:%.*]]
 ; CHECK:       bb1:
-; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane(i32 [[READ0]], i32 0)
+; CHECK-NEXT:    [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 0)
 ; CHECK-NEXT:    ret i32 [[READ1]]
 ;
 bb0:

>From dfa321965462920f8e656695a425c7a7c24dcf42 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Mon, 15 Apr 2024 09:11:14 +0000
Subject: [PATCH 03/11] Fix issues with writelane expansion

---
 llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 3 ++-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp  | 3 ---
 llvm/lib/Target/AMDGPU/SIInstructions.td   | 4 +++-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 8b21c22b449710..d722b6fb56bccb 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -691,7 +691,8 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
 
         break;
       }
-      case AMDGPU::V_WRITELANE_B32: {
+      case AMDGPU::V_WRITELANE_B32:
+      case AMDGPU::V_WRITELANE_PSEUDO_B64: {
         // Some architectures allow more than one constant bus access without
         // SGPR restriction
         if (ST.getConstantBusLimit(MI.getOpcode()) != 1)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4ae17ccfe80674..e59e286b408a10 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5160,9 +5160,6 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
 
     const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
-    const TargetRegisterClass *Src1RC = Src1.isReg()
-                                        ? MRI.getRegClass(Src1.getReg())
-                                        : &AMDGPU::SReg_32RegClass;
     const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());                                     
     
     const TargetRegisterClass *Src0SubRC =
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index decd59be5eb29d..6a3ae238a6e84b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -303,7 +303,9 @@ let usesCustomInserter = 1 in {
   def V_WRITELANE_PSEUDO_B64 : VPseudoInstSI <
   (outs VReg_64:$sdst), (ins SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2),
   [(set i64:$sdst, (int_amdgcn_writelane i64:$src0, i32:$src1, i64:$src2))]
-  >;
+  > {
+    let UseNamedOperandTable = 1;
+  }
 }
 
 let usesCustomInserter = 1, Defs = [SCC] in {

>From fcc0a1a3ab4a133871d8279eead5d3579585ebcc Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Thu, 18 Apr 2024 05:42:37 +0000
Subject: [PATCH 04/11] code refactor and add patterns for f64

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 227 +++++++++-------------
 llvm/lib/Target/AMDGPU/SIInstructions.td  |  35 +++-
 2 files changed, 117 insertions(+), 145 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e59e286b408a10..5d39f9e9b183d7 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4822,6 +4822,94 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   return RetBB;
 }
 
+static MachineBasicBlock* lowerPseudoLaneOp(MachineInstr &MI,
+                                            MachineBasicBlock *BB,
+                                            const GCNSubtarget &ST,
+                                            unsigned Opc) {
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
+    const DebugLoc &DL = MI.getDebugLoc();
+    const SIInstrInfo *TII = ST.getInstrInfo();
+
+    MachineOperand &Dest = MI.getOperand(0);
+    MachineOperand &Src0 = MI.getOperand(1);
+
+    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+    const TargetRegisterClass *Src0SubRC =
+                              TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
+
+    Register DestSub0 = MRI.createVirtualRegister(
+      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass: &AMDGPU::SGPR_32RegClass);
+    Register DestSub1 = MRI.createVirtualRegister(
+      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass: &AMDGPU::SGPR_32RegClass);
+
+
+    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+
+    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+
+    MachineInstr *LoHalf, *HighHalf;
+    switch(Opc) {
+      case AMDGPU::V_READLANE_PSEUDO_B64: {
+        MachineOperand &Src1 = MI.getOperand(2);
+        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
+                              .add(SrcReg0Sub0)
+                              .add(Src1);
+        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
+                                .add(SrcReg0Sub1)
+                                .add(Src1);
+         break;
+        }
+      case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
+        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
+                               .add(SrcReg0Sub0);
+        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
+                                 .add(SrcReg0Sub1);
+        break;
+        }
+      case AMDGPU::V_WRITELANE_PSEUDO_B64: {
+         MachineOperand &Src1 = MI.getOperand(2);
+         MachineOperand &Src2 = MI.getOperand(3);
+
+         const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
+         const TargetRegisterClass *Src2SubRC =
+                                    TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+
+        MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
+              MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
+
+        MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
+              MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);
+
+        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
+                        .add(SrcReg0Sub0)
+                        .add(Src1)
+                        .add(SrcReg2Sub0);
+        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
+                          .add(SrcReg0Sub1)
+                          .add(Src1)
+                          .add(SrcReg2Sub1);
+        break;
+        }
+        default:
+          llvm_unreachable("should not occur");
+      }
+
+    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+          .addReg(DestSub0)
+          .addImm(AMDGPU::sub0)
+          .addReg(DestSub1)
+          .addImm(AMDGPU::sub1);
+
+    TII->legalizeOperands(*LoHalf);
+    TII->legalizeOperands(*HighHalf);
+
+    MI.eraseFromParent();
+    return BB;
+}
+
 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
   MachineInstr &MI, MachineBasicBlock *BB) const {
 
@@ -5065,141 +5153,10 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     MI.eraseFromParent();
     return BB;
   }
-  case AMDGPU::V_READLANE_PSEUDO_B64: {
-    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
-    const SIRegisterInfo *TRI = ST.getRegisterInfo();
-    const DebugLoc &DL = MI.getDebugLoc();
-
-    MachineOperand &Dest = MI.getOperand(0);
-    MachineOperand &Src0 = MI.getOperand(1);
-    MachineOperand &Src1 = MI.getOperand(2);
-
-    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-
-    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
-    const TargetRegisterClass *Src1RC = Src1.isReg()
-                                        ? MRI.getRegClass(Src1.getReg())
-                                        : &AMDGPU::SReg_32RegClass;
-    
-    const TargetRegisterClass *Src0SubRC =
-        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
-    
-    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
-
-    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
-    
-    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
-                           .add(SrcReg0Sub0)
-                           .add(Src1);
-    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
-                           .add(SrcReg0Sub1)
-                           .add(Src1);
-
-    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
-          .addReg(DestSub0)
-          .addImm(AMDGPU::sub0)
-          .addReg(DestSub1)
-          .addImm(AMDGPU::sub1);
-
-    MI.eraseFromParent();
-    return BB;
-  }
-  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
-    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
-    const SIRegisterInfo *TRI = ST.getRegisterInfo();
-    const DebugLoc &DL = MI.getDebugLoc();
-
-    MachineOperand &Dest = MI.getOperand(0);
-    MachineOperand &Src0 = MI.getOperand(1);
-
-    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-
-    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
-
-    const TargetRegisterClass *Src0SubRC =
-        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
-    
-    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
-
-    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
-
-    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
-                           .add(SrcReg0Sub0);
-    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
-                           .add(SrcReg0Sub1);
-    
-    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
-          .addReg(DestSub0)
-          .addImm(AMDGPU::sub0)
-          .addReg(DestSub1)
-          .addImm(AMDGPU::sub1);
-
-    MI.eraseFromParent();
-    return BB;
-  }
-  case AMDGPU::V_WRITELANE_PSEUDO_B64: {
-    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
-    const SIRegisterInfo *TRI = ST.getRegisterInfo();
-    const DebugLoc &DL = MI.getDebugLoc();
-
-    MachineOperand &Dest = MI.getOperand(0);
-    MachineOperand &Src0 = MI.getOperand(1);
-    MachineOperand &Src1 = MI.getOperand(2);
-    MachineOperand &Src2 = MI.getOperand(3);
-
-    Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-    Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
-
-    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
-    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());                                     
-    
-    const TargetRegisterClass *Src0SubRC =
-        TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
-    
-    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
-
-    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
-
-    
-    const TargetRegisterClass *Src2SubRC =
-        TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-    
-    MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
-
-    MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);        
-    
-    
-    MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
-                           .add(SrcReg0Sub0)
-                           .add(Src1)
-                           .add(SrcReg2Sub0);
-    MachineInstr *HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
-                           .add(SrcReg0Sub1)
-                           .add(Src1)
-                           .add(SrcReg2Sub1);
-    
-    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
-          .addReg(DestSub0)
-          .addImm(AMDGPU::sub0)
-          .addReg(DestSub1)
-          .addImm(AMDGPU::sub1);
-
-    MI.eraseFromParent();
-    return BB;   
-  }
+  case AMDGPU::V_READLANE_PSEUDO_B64:
+  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64:
+  case AMDGPU::V_WRITELANE_PSEUDO_B64:
+      return lowerPseudoLaneOp(MI, BB, *getSubtarget(), MI.getOpcode());
   case AMDGPU::SI_INIT_M0: {
     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 6a3ae238a6e84b..e8ece71fe07b7b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -291,22 +291,37 @@ def V_SUB_U64_PSEUDO : VPseudoInstSI <
 
 let usesCustomInserter = 1 in {
   def V_READLANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs SReg_64:$sdst), (ins VReg_64:$src0, SSrc_b32:$src1),
-  [(set i64:$sdst, (int_amdgcn_readlane i64:$src0, i32:$src1))]
-  >;
+  (outs SReg_64:$sdst), (ins VReg_64:$src0, SSrc_b32:$src1)>;
 
   def V_READFIRSTLANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs SReg_64:$sdst), (ins VReg_64:$src0),
-  [(set i64:$sdst, (int_amdgcn_readfirstlane i64:$src0))]
-  >;
+  (outs SReg_64:$sdst), (ins VReg_64:$src0)>;
   
   def V_WRITELANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs VReg_64:$sdst), (ins SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2),
-  [(set i64:$sdst, (int_amdgcn_writelane i64:$src0, i32:$src1, i64:$src2))]
-  > {
+  (outs VReg_64:$sdst), (ins SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2)> {
     let UseNamedOperandTable = 1;
   }
-}
+} // End usesCustomInserter = 1
+
+class ReadLanePseudoPat <ValueType vt> : GCNPat <
+  (vt (int_amdgcn_readlane vt:$src0, i32:$src1)),
+  (V_READLANE_PSEUDO_B64 VReg_64:$src0, SSrc_b32:$src1)>;
+
+def : ReadLanePseudoPat<i64>;
+def : ReadLanePseudoPat<f64>;
+
+class WriteLanePseudoPat <ValueType vt> : GCNPat <
+  (vt (int_amdgcn_writelane vt:$src0, i32:$src1, vt:$src2)),
+  (V_WRITELANE_PSEUDO_B64 SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2)>;
+
+def : WriteLanePseudoPat<i64>;
+def : WriteLanePseudoPat<f64>;
+
+class ReadFirstLanePseudoPat <ValueType vt> : GCNPat <
+  (vt (int_amdgcn_readfirstlane vt:$src0)),
+  (V_READFIRSTLANE_PSEUDO_B64 VReg_64:$src0)>;
+
+def : ReadFirstLanePseudoPat<i64>;
+def : ReadFirstLanePseudoPat<f64>;
 
 let usesCustomInserter = 1, Defs = [SCC] in {
 def S_ADD_U64_PSEUDO : SPseudoInstSI <

>From 4e71a0649b775f40962cea4050c3a50f3ee1fa2e Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Thu, 18 Apr 2024 06:06:08 +0000
Subject: [PATCH 05/11] clang format

---
 .../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp   |  28 ++--
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 151 +++++++++---------
 2 files changed, 91 insertions(+), 88 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 5b3fa148e56192..0ec77d66e596da 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -493,8 +493,8 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
     if (!ST->isWave32()) {
       // Combine lane 31 into lanes 32..63.
       V = B.CreateBitCast(V, IntNTy);
-      Value *const Lane31 = B.CreateIntrinsic(Intrinsic::amdgcn_readlane, B.getInt32Ty(),
-                                              {V, B.getInt32(31)});
+      Value *const Lane31 = B.CreateIntrinsic(
+          Intrinsic::amdgcn_readlane, B.getInt32Ty(), {V, B.getInt32(31)});
 
       Value *UpdateDPPCall = B.CreateCall(
           UpdateDPP, {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID),
@@ -523,10 +523,10 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
                      {Identity, V, B.getInt32(DPP::WAVE_SHR1), B.getInt32(0xf),
                       B.getInt32(0xf), B.getFalse()});
   } else {
-    Function *ReadLane =
-        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
-    Function *WriteLane =
-        Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, B.getInt32Ty());
+    Function *ReadLane = Intrinsic::getDeclaration(
+        M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
+    Function *WriteLane = Intrinsic::getDeclaration(
+        M, Intrinsic::amdgcn_writelane, B.getInt32Ty());
 
     // On GFX10 all DPP operations are confined to a single row. To get cross-
     // row operations we have to use permlane or readlane.
@@ -598,8 +598,8 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
 
   // Get the value required for atomic operation
   V = B.CreateBitCast(V, IntNTy);
-  Value *LaneValue =
-      B.CreateIntrinsic(Intrinsic::amdgcn_readlane, B.getInt32Ty(), {V, LaneIdxInt});
+  Value *LaneValue = B.CreateIntrinsic(Intrinsic::amdgcn_readlane,
+                                       B.getInt32Ty(), {V, LaneIdxInt});
   LaneValue = B.CreateBitCast(LaneValue, Ty);
 
   // Perform writelane if intermediate scan results are required later in the
@@ -925,10 +925,10 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
       Value *const ExtractLo = B.CreateTrunc(CastedPhi, Int32Ty);
       Value *const ExtractHi =
           B.CreateTrunc(B.CreateLShr(CastedPhi, 32), Int32Ty);
-      CallInst *const ReadFirstLaneLo =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractLo);
-      CallInst *const ReadFirstLaneHi =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractHi);
+      CallInst *const ReadFirstLaneLo = B.CreateIntrinsic(
+          Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractLo);
+      CallInst *const ReadFirstLaneHi = B.CreateIntrinsic(
+          Intrinsic::amdgcn_readfirstlane, Int32Ty, ExtractHi);
       Value *const PartialInsert = B.CreateInsertElement(
           PoisonValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0));
       Value *const Insert =
@@ -936,8 +936,8 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
       BroadcastI = B.CreateBitCast(Insert, Ty);
     } else if (TyBitWidth == 32) {
       Value *CastedPhi = B.CreateBitCast(PHI, IntNTy);
-      BroadcastI =
-          B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty, CastedPhi);
+      BroadcastI = B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, Int32Ty,
+                                     CastedPhi);
       BroadcastI = B.CreateBitCast(BroadcastI, Ty);
 
     } else {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5d39f9e9b183d7..b070ecafd950f2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4822,92 +4822,95 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   return RetBB;
 }
 
-static MachineBasicBlock* lowerPseudoLaneOp(MachineInstr &MI,
+static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
                                             MachineBasicBlock *BB,
                                             const GCNSubtarget &ST,
                                             unsigned Opc) {
-    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-    const SIRegisterInfo *TRI = ST.getRegisterInfo();
-    const DebugLoc &DL = MI.getDebugLoc();
-    const SIInstrInfo *TII = ST.getInstrInfo();
+  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+  const SIRegisterInfo *TRI = ST.getRegisterInfo();
+  const DebugLoc &DL = MI.getDebugLoc();
+  const SIInstrInfo *TII = ST.getInstrInfo();
 
-    MachineOperand &Dest = MI.getOperand(0);
-    MachineOperand &Src0 = MI.getOperand(1);
+  MachineOperand &Dest = MI.getOperand(0);
+  MachineOperand &Src0 = MI.getOperand(1);
 
-    const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
-    const TargetRegisterClass *Src0SubRC =
-                              TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
+  const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+  const TargetRegisterClass *Src0SubRC =
+      TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
 
-    Register DestSub0 = MRI.createVirtualRegister(
-      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass: &AMDGPU::SGPR_32RegClass);
-    Register DestSub1 = MRI.createVirtualRegister(
-      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass: &AMDGPU::SGPR_32RegClass);
+  Register DestSub0 = MRI.createVirtualRegister(
+      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass
+                                              : &AMDGPU::SGPR_32RegClass);
+  Register DestSub1 = MRI.createVirtualRegister(
+      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass
+                                              : &AMDGPU::SGPR_32RegClass);
 
+  MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
+      MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
 
-    MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+  MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
+      MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
 
-    MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+  MachineInstr *LoHalf, *HighHalf;
+  switch (Opc) {
+  case AMDGPU::V_READLANE_PSEUDO_B64: {
+    MachineOperand &Src1 = MI.getOperand(2);
+    LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
+                 .add(SrcReg0Sub0)
+                 .add(Src1);
+    HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
+                   .add(SrcReg0Sub1)
+                   .add(Src1);
+    break;
+  }
+  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
+    LoHalf =
+        BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
+            .add(SrcReg0Sub0);
+    HighHalf =
+        BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
+            .add(SrcReg0Sub1);
+    break;
+  }
+  case AMDGPU::V_WRITELANE_PSEUDO_B64: {
+    MachineOperand &Src1 = MI.getOperand(2);
+    MachineOperand &Src2 = MI.getOperand(3);
 
-    MachineInstr *LoHalf, *HighHalf;
-    switch(Opc) {
-      case AMDGPU::V_READLANE_PSEUDO_B64: {
-        MachineOperand &Src1 = MI.getOperand(2);
-        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
-                              .add(SrcReg0Sub0)
-                              .add(Src1);
-        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
-                                .add(SrcReg0Sub1)
-                                .add(Src1);
-         break;
-        }
-      case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
-        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
-                               .add(SrcReg0Sub0);
-        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
-                                 .add(SrcReg0Sub1);
-        break;
-        }
-      case AMDGPU::V_WRITELANE_PSEUDO_B64: {
-         MachineOperand &Src1 = MI.getOperand(2);
-         MachineOperand &Src2 = MI.getOperand(3);
-
-         const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
-         const TargetRegisterClass *Src2SubRC =
-                                    TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-
-        MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
-              MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
-
-        MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
-              MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);
-
-        LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
-                        .add(SrcReg0Sub0)
-                        .add(Src1)
-                        .add(SrcReg2Sub0);
-        HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
-                          .add(SrcReg0Sub1)
-                          .add(Src1)
-                          .add(SrcReg2Sub1);
-        break;
-        }
-        default:
-          llvm_unreachable("should not occur");
-      }
+    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
+    const TargetRegisterClass *Src2SubRC =
+        TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+
+    MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
+
+    MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
+        MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);
+
+    LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
+                 .add(SrcReg0Sub0)
+                 .add(Src1)
+                 .add(SrcReg2Sub0);
+    HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
+                   .add(SrcReg0Sub1)
+                   .add(Src1)
+                   .add(SrcReg2Sub1);
+    break;
+  }
+  default:
+    llvm_unreachable("should not occur");
+  }
 
-    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
-          .addReg(DestSub0)
-          .addImm(AMDGPU::sub0)
-          .addReg(DestSub1)
-          .addImm(AMDGPU::sub1);
+  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
+      .addReg(DestSub0)
+      .addImm(AMDGPU::sub0)
+      .addReg(DestSub1)
+      .addImm(AMDGPU::sub1);
 
-    TII->legalizeOperands(*LoHalf);
-    TII->legalizeOperands(*HighHalf);
+  TII->legalizeOperands(*LoHalf);
+  TII->legalizeOperands(*HighHalf);
 
-    MI.eraseFromParent();
-    return BB;
+  MI.eraseFromParent();
+  return BB;
 }
 
 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
@@ -5156,7 +5159,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
   case AMDGPU::V_READLANE_PSEUDO_B64:
   case AMDGPU::V_READFIRSTLANE_PSEUDO_B64:
   case AMDGPU::V_WRITELANE_PSEUDO_B64:
-      return lowerPseudoLaneOp(MI, BB, *getSubtarget(), MI.getOpcode());
+    return lowerPseudoLaneOp(MI, BB, *getSubtarget(), MI.getOpcode());
   case AMDGPU::SI_INIT_M0: {
     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)

>From c7ff0e53511f485cad9c2370a54e68c77ef2a957 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Thu, 18 Apr 2024 11:26:08 +0000
Subject: [PATCH 06/11] fix corner case with regkill and add readlane tests

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  12 ++
 .../CodeGen/AMDGPU/llvm.amdgcn.readlane.ll    | 187 ++++++++++++++++--
 2 files changed, 180 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b070ecafd950f2..79a9f451589b16 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4855,9 +4855,15 @@ static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
   switch (Opc) {
   case AMDGPU::V_READLANE_PSEUDO_B64: {
     MachineOperand &Src1 = MI.getOperand(2);
+    auto IsKill = (Src1.isReg() && Src1.isKill());
+    if (IsKill)
+      Src1.setIsKill(false);
     LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
                  .add(SrcReg0Sub0)
                  .add(Src1);
+
+    if (IsKill)
+      Src1.setIsKill(true);
     HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
                    .add(SrcReg0Sub1)
                    .add(Src1);
@@ -4875,6 +4881,7 @@ static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
   case AMDGPU::V_WRITELANE_PSEUDO_B64: {
     MachineOperand &Src1 = MI.getOperand(2);
     MachineOperand &Src2 = MI.getOperand(3);
+    auto IsKill = (Src1.isReg() && Src1.isKill());
 
     const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
     const TargetRegisterClass *Src2SubRC =
@@ -4886,10 +4893,15 @@ static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
     MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
         MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);
 
+    if (IsKill)
+      Src1.setIsKill(false);
     LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
                  .add(SrcReg0Sub0)
                  .add(Src1)
                  .add(SrcReg2Sub0);
+
+    if (IsKill)
+      Src1.setIsKill(true);
     HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
                    .add(SrcReg0Sub1)
                    .add(Src1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
index 51465f6bd10ce5..49bb8ca262e85b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
@@ -1,46 +1,141 @@
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope %s
 
-declare i32 @llvm.amdgcn.readlane(i32, i32) #0
+declare i32 @llvm.amdgcn.readlane.i32(i32, i32) #0
+declare i64 @llvm.amdgcn.readlane.i64(i64, i32) #0
+declare double @llvm.amdgcn.readlane.f64(double, i32) #0
 
-; CHECK-LABEL: {{^}}test_readlane_sreg_sreg:
+; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_i32:
 ; CHECK-NOT: v_readlane_b32
-define amdgpu_kernel void @test_readlane_sreg_sreg(i32 %src0, i32 %src1) #1 {
-  %readlane = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 {
+  %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %src0, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i32 %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vreg_sreg:
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_i64:
+; CHECK: v_mov_b32_e32 [[VREG0:v[0-9]+]], {{s[0-9]+}}
+; CHECK: v_mov_b32_e32 [[VREG1:v[0-9]+]], {{s[0-9]+}}
+; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG0]], {{s[0-9]+}}
+; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG1]], {{s[0-9]+}}
+define amdgpu_kernel void @test_readlane_sreg_sreg_i64(i64 %src0, i32 %src1) #1 {
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(i64 %readlane)
+  ret void
+}
+
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_f64:
+; CHECK: v_mov_b32_e32 [[VREG0:v[0-9]+]], {{s[0-9]+}}
+; CHECK: v_mov_b32_e32 [[VREG1:v[0-9]+]], {{s[0-9]+}}
+; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG0]], {{s[0-9]+}}
+; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG1]], {{s[0-9]+}}
+define amdgpu_kernel void @test_readlane_sreg_sreg_f64(double %src0, i32 %src1) #1 {
+  %readlane = call double @llvm.amdgcn.readlane.f64(double %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(double %readlane)
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_i32:
 ; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-define amdgpu_kernel void @test_readlane_vreg_sreg(i32 %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_readlane_vreg_sreg_i32(i32 %src0, i32 %src1) #1 {
   %vgpr = call i32 asm sideeffect "; def $0", "=v"()
-  %readlane = call i32 @llvm.amdgcn.readlane(i32 %vgpr, i32 %src1)
+  %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %vgpr, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i32 %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_imm_sreg:
+; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_i64:
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_readlane_vreg_sreg_i64(i64 %src0, i32 %src1) #1 {
+  %vgpr = call i64 asm sideeffect "; def $0", "=v"()
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %vgpr, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(i64 %readlane)
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_f64:
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_readlane_vreg_sreg_f64(double %src0, i32 %src1) #1 {
+  %vgpr = call double asm sideeffect "; def $0", "=v"()
+  %readlane = call double @llvm.amdgcn.readlane.f64(double %vgpr, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(double %readlane)
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_imm_sreg_i32:
 ; CHECK-NOT: v_readlane_b32
-define amdgpu_kernel void @test_readlane_imm_sreg(ptr addrspace(1) %out, i32 %src1) #1 {
-  %readlane = call i32 @llvm.amdgcn.readlane(i32 32, i32 %src1)
+define amdgpu_kernel void @test_readlane_imm_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
+  %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 32, i32 %src1)
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vregs:
+; CHECK-LABEL: {{^}}test_readlane_imm_sreg_i64:
+; CHECK-NOT: v_readlane_b32
+define amdgpu_kernel void @test_readlane_imm_sreg_i64(ptr addrspace(1) %out, i32 %src1) #1 {
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 32, i32 %src1)
+  store i64 %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_imm_sreg_f64:
+; CHECK-NOT: v_readlane_b32
+define amdgpu_kernel void @test_readlane_imm_sreg_f64(ptr addrspace(1) %out, i32 %src1) #1 {
+  %readlane = call double @llvm.amdgcn.readlane.f64(double 32.0, i32 %src1)
+  store double %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_vregs_i32:
 ; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
 ; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
-define amdgpu_kernel void @test_readlane_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @test_readlane_vregs_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i32>, ptr addrspace(1) %gep.in
   %value = extractelement <2 x i32> %args, i32 0
   %lane = extractelement <2 x i32> %args, i32 1
-  %readlane = call i32 @llvm.amdgcn.readlane(i32 %value, i32 %lane)
+  %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %value, i32 %lane)
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
+; CHECK-LABEL: {{^}}test_readlane_vregs_i64:
+; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
+define amdgpu_kernel void @test_readlane_vregs_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep.in = getelementptr <2 x i64>, ptr addrspace(1) %in, i32 %tid
+  %args = load <2 x i64>, ptr addrspace(1) %gep.in
+  %value = extractelement <2 x i64> %args, i32 0
+  %lane = extractelement <2 x i64> %args, i32 1
+  %lane32 = trunc i64 %lane to i32
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %value, i32 %lane32)
+  store i64 %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_vregs_f64:
+; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
+define amdgpu_kernel void @test_readlane_vregs_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep.in = getelementptr <2 x double>, ptr addrspace(1) %in, i32 %tid
+  %args = load <2 x double>, ptr addrspace(1) %gep.in
+  %value = extractelement <2 x double> %args, i32 0
+  %lane = extractelement <2 x double> %args, i32 1
+  %lane_cast = bitcast double %lane to i64
+  %lane32 = trunc i64 %lane_cast to i32
+  %readlane = call double @llvm.amdgcn.readlane.f64(double %value, i32 %lane32)
+  store double %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 ; TODO: m0 should be folded.
 ; CHECK-LABEL: {{^}}test_readlane_m0_sreg:
 ; CHECK: s_mov_b32 m0, -1
@@ -53,16 +148,36 @@ define amdgpu_kernel void @test_readlane_m0_sreg(ptr addrspace(1) %out, i32 %src
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vgpr_imm:
+; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_i32:
 ; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
-define amdgpu_kernel void @test_readlane_vgpr_imm(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readlane_vgpr_imm_i32(ptr addrspace(1) %out) #1 {
   %vgpr = call i32 asm sideeffect "; def $0", "=v"()
-  %readlane = call i32 @llvm.amdgcn.readlane(i32 %vgpr, i32 32) #0
+  %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %vgpr, i32 32) #0
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr:
+; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_i64:
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
+define amdgpu_kernel void @test_readlane_vgpr_imm_i64(ptr addrspace(1) %out) #1 {
+  %vgpr = call i64 asm sideeffect "; def $0", "=v"()
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %vgpr, i32 32) #0
+  store i64 %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_f64:
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
+; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
+define amdgpu_kernel void @test_readlane_vgpr_imm_f64(ptr addrspace(1) %out) #1 {
+  %vgpr = call double asm sideeffect "; def $0", "=v"()
+  %readlane = call double @llvm.amdgcn.readlane.f64(double %vgpr, i32 32) #0
+  store double %readlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_i32:
 ; CHECK: ;;#ASMSTART
 ; CHECK-NEXT: s_mov_b32 [[SGPR:s[0-9]+]]
 ; CHECK: ;;#ASMEND
@@ -70,13 +185,47 @@ define amdgpu_kernel void @test_readlane_vgpr_imm(ptr addrspace(1) %out) #1 {
 ; CHECK-NOT: readlane
 ; CHECK: v_mov_b32_e32 [[VCOPY:v[0-9]+]], [[SGPR]]
 ; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VCOPY]]
-define amdgpu_kernel void @test_readlane_copy_from_sgpr(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readlane_copy_from_sgpr_i32(ptr addrspace(1) %out) #1 {
   %sgpr = call i32 asm "s_mov_b32 $0, 0", "=s"()
-  %readfirstlane = call i32 @llvm.amdgcn.readlane(i32 %sgpr, i32 7)
+  %readfirstlane = call i32 @llvm.amdgcn.readlane.i32(i32 %sgpr, i32 7)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_i64:
+; CHECK: ;;#ASMSTART
+; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
+; CHECK: ;;#ASMEND
+; CHECK: v_readlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}, 7
+; CHECK: v_readlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}, 7
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
+; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_readlane_copy_from_sgpr_i64(ptr addrspace(1) %out) #1 {
+  %sgpr = call i64 asm "s_mov_b64 $0, 0", "=s"()
+  %readfirstlane = call i64 @llvm.amdgcn.readlane.i64(i64 %sgpr, i32 7)
+  store i64 %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_f64:
+; CHECK: ;;#ASMSTART
+; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
+; CHECK: ;;#ASMEND
+; CHECK: v_readlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}, 7
+; CHECK: v_readlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}, 7
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
+; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %out) #1 {
+  %sgpr = call double asm "s_mov_b64 $0, 0", "=s"()
+  %readfirstlane = call double @llvm.amdgcn.readlane.f64(double %sgpr, i32 7)
+  store double %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x() #2
 
 attributes #0 = { nounwind readnone convergent }

>From d6a8ce4c5f54b19cef43ffb470698689ab6dfee6 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Fri, 19 Apr 2024 07:13:44 +0000
Subject: [PATCH 07/11] update builtin handling for readlane and readfirstlane

---
 clang/lib/CodeGen/CGBuiltin.cpp             | 18 ++++++++++++++++++
 clang/test/CodeGenOpenCL/builtins-amdgcn.cl |  4 ++--
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td    |  3 ---
 3 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a05874e63c73c2..980d8c0887c4e4 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -18410,6 +18410,24 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
         CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
     return Builder.CreateCall(F, Args);
   }
+  case AMDGPU::BI__builtin_amdgcn_readlane:
+  case AMDGPU::BI__builtin_amdgcn_readfirstlane: {
+    llvm::SmallVector<llvm::Value *, 6> Args;
+    unsigned ICEArguments = 0;
+    ASTContext::GetBuiltinTypeError Error;
+    Intrinsic::ID IID = (BuiltinID == AMDGPU::BI__builtin_amdgcn_readlane)
+                            ? Intrinsic::amdgcn_readlane
+                            : Intrinsic::amdgcn_readfirstlane;
+
+    getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+    assert(Error == ASTContext::GE_None && "Should not codegen an error");
+    for (unsigned I = 0; I != E->getNumArgs(); ++I) {
+      Args.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, I, E));
+    }
+
+    Function *F = CGM.getIntrinsic(IID, Args[0]->getType());
+    return Builder.CreateCall(F, Args);
+  }
   case AMDGPU::BI__builtin_amdgcn_div_fixup:
   case AMDGPU::BI__builtin_amdgcn_div_fixupf:
   case AMDGPU::BI__builtin_amdgcn_div_fixuph:
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
index bdca97c8878670..f93d5ac29a2cc9 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -306,14 +306,14 @@ void test_ds_bpermute(global int* out, int a, int b)
 }
 
 // CHECK-LABEL: @test_readfirstlane
-// CHECK: call i32 @llvm.amdgcn.readfirstlane(i32 %a)
+// CHECK: call i32 @llvm.amdgcn.readfirstlane.i32(i32 %a)
 void test_readfirstlane(global int* out, int a)
 {
   *out = __builtin_amdgcn_readfirstlane(a);
 }
 
 // CHECK-LABEL: @test_readlane
-// CHECK: call i32 @llvm.amdgcn.readlane(i32 %a, i32 %b)
+// CHECK: call i32 @llvm.amdgcn.readlane.i32(i32 %a, i32 %b)
 void test_readlane(global int* out, int a, int b)
 {
   *out = __builtin_amdgcn_readlane(a, b);
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 34feee1c56be82..2afdfa1b1273d9 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2176,14 +2176,12 @@ def int_amdgcn_wave_reduce_umin : AMDGPUWaveReduce;
 def int_amdgcn_wave_reduce_umax : AMDGPUWaveReduce;
 
 def int_amdgcn_readfirstlane :
-  ClangBuiltin<"__builtin_amdgcn_readfirstlane">,
   Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
 
 // The lane argument must be uniform across the currently active threads of the
 // current wave. Otherwise, the result is undefined.
 def int_amdgcn_readlane :
-  ClangBuiltin<"__builtin_amdgcn_readlane">,
   Intrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty],
             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
 
@@ -2191,7 +2189,6 @@ def int_amdgcn_readlane :
 // currently active threads of the current wave. Otherwise, the result is
 // undefined.
 def int_amdgcn_writelane :
-  ClangBuiltin<"__builtin_amdgcn_writelane">,
   Intrinsic<[llvm_any_ty], [
     LLVMMatchType<0>,    // uniform value to write: returned by the selected lane
     llvm_i32_ty,        // uniform lane select

>From 15cbd9052fa974bb0cce0a58bb0b14a2a99164d1 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Fri, 19 Apr 2024 19:04:23 +0000
Subject: [PATCH 08/11] add and update tests, fixes to writelane src0 imm
 handling

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   4 +-
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       | 119 +++++++++--
 .../CodeGen/AMDGPU/llvm.amdgcn.writelane.ll   | 199 ++++++++++++++++--
 3 files changed, 288 insertions(+), 34 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 79a9f451589b16..ae928a6813a842 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4834,7 +4834,8 @@ static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
   MachineOperand &Dest = MI.getOperand(0);
   MachineOperand &Src0 = MI.getOperand(1);
 
-  const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+  const TargetRegisterClass *Src0RC =
+      Src0.isReg() ? MRI.getRegClass(Src0.getReg()) : &AMDGPU::SReg_64RegClass;
   const TargetRegisterClass *Src0SubRC =
       TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
 
@@ -4895,6 +4896,7 @@ static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
 
     if (IsKill)
       Src1.setIsKill(false);
+
     LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
                  .add(SrcReg0Sub0)
                  .add(Src1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index 0284f44f5f14d4..c5e0e9ffd3a9b4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -1,35 +1,96 @@
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope %s
 
 declare i32 @llvm.amdgcn.readfirstlane(i32) #0
+declare i64 @llvm.amdgcn.readfirstlane.i64(i64) #0
+declare double @llvm.amdgcn.readfirstlane.f64(double) #0
 
-; CHECK-LABEL: {{^}}test_readfirstlane:
+; CHECK-LABEL: {{^}}test_readfirstlane_i32:
 ; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v2
-define void @test_readfirstlane(ptr addrspace(1) %out, i32 %src) #1 {
-  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %src)
+define void @test_readfirstlane_i32(ptr addrspace(1) %out, i32 %src) #1 {
+  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %src)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm:
+; CHECK-LABEL: {{^}}test_readfirstlane_i64:
+; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
+; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
+define void @test_readfirstlane_i64(ptr addrspace(1) %out, i64 %src) #1 {
+  %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %src)
+  store i64 %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readfirstlane_f64:
+; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
+; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
+define void @test_readfirstlane_f64(ptr addrspace(1) %out, double %src) #1 {
+  %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double %src)
+  store double %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readfirstlane_imm_i32:
 ; CHECK: s_mov_b32 [[SGPR_VAL:s[0-9]]], 32
 ; CHECK-NOT: [[SGPR_VAL]]
 ; CHECK: ; use [[SGPR_VAL]]
-define amdgpu_kernel void @test_readfirstlane_imm(ptr addrspace(1) %out) #1 {
-  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 32)
+define amdgpu_kernel void @test_readfirstlane_imm_i32(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 32)
   call void asm sideeffect "; use $0", "s"(i32 %readfirstlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_fold:
+; CHECK-LABEL: {{^}}test_readfirstlane_imm_i64:
+; CHECK: s_mov_b64 [[SGPR_VAL:s\[[0-9]+:[0-9]+\]]], 32
+; CHECK: use [[SGPR_VAL]]
+define amdgpu_kernel void @test_readfirstlane_imm_i64(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 32)
+  call void asm sideeffect "; use $0", "s"(i64 %readfirstlane)
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readfirstlane_imm_f64:
+; CHECK: s_mov_b32 s[[VAL0:[0-9]+]], 0
+; CHECK: s_mov_b32 s[[VAL1:[0-9]+]], 0x40400000
+; use s[[VAL0\:VAL1]]
+define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double 32.0)
+  call void asm sideeffect "; use $0", "s"(double %readfirstlane)
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_readfirstlane_imm_fold_i32:
 ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32
 ; CHECK-NOT: [[VVAL]]
 ; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]]
-define amdgpu_kernel void @test_readfirstlane_imm_fold(ptr addrspace(1) %out) #1 {
-  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 32)
+define amdgpu_kernel void @test_readfirstlane_imm_fold_i32(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 32)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
+; CHECK-LABEL: {{^}}test_readfirstlane_imm_fold_i64:
+; CHECK: s_mov_b64 s[[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]], 32
+; CHECK: v_mov_b32_e32 v[[RES0:[0-9]+]], s[[VAL0]]
+; CHECK: v_mov_b32_e32 v[[RES1:[0-9]+]], s[[VAL1]]
+; CHECK: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RES0]]:[[RES1]]]
+define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 32)
+  store i64 %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK: s_mov_b32 s[[VAL0:[0-9]+]], 0
+; CHECK: s_mov_b32 s[[VAL1:[0-9]+]], 0x40400000
+; CHECK: v_mov_b32_e32 v[[RES0:[0-9]+]], s[[VAL0]]
+; CHECK: v_mov_b32_e32 v[[RES1:[0-9]+]], s[[VAL1]]
+; CHECK: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RES0]]:[[RES1]]]
+define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out) #1 {
+  %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double 32.0)
+  store double %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 ; CHECK-LABEL: {{^}}test_readfirstlane_m0:
 ; CHECK: s_mov_b32 m0, -1
 ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0
@@ -41,7 +102,7 @@ define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) #1 {
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr:
+; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_i32:
 ; CHECK: ;;#ASMSTART
 ; CHECK-NEXT: s_mov_b32 [[SGPR:s[0-9]+]]
 ; CHECK: ;;#ASMEND
@@ -49,13 +110,47 @@ define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) #1 {
 ; CHECK-NOT: readfirstlane
 ; CHECK: v_mov_b32_e32 [[VCOPY:v[0-9]+]], [[SGPR]]
 ; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VCOPY]]
-define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i32(ptr addrspace(1) %out) #1 {
   %sgpr = call i32 asm "s_mov_b32 $0, 0", "=s"()
-  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %sgpr)
+  %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %sgpr)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_i64:
+; CHECK: ;;#ASMSTART
+; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
+; CHECK: ;;#ASMEND
+; CHECK: v_readfirstlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}
+; CHECK: v_readfirstlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
+; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1) %out) #1 {
+  %sgpr = call i64 asm "s_mov_b64 $0, 0", "=s"()
+  %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %sgpr)
+  store i64 %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; TODO: should optimize this as for i32
+; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_f64:
+; CHECK: ;;#ASMSTART
+; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
+; CHECK: ;;#ASMEND
+; CHECK: v_readfirstlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}
+; CHECK: v_readfirstlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
+; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
+; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
+define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1) %out) #1 {
+  %sgpr = call double asm "s_mov_b64 $0, 0", "=s"()
+  %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double %sgpr)
+  store double %readfirstlane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 ; Make sure this doesn't crash.
 ; CHECK-LABEL: {{^}}test_readfirstlane_fi:
 ; CHECK: s_mov_b32 [[FIVAL:s[0-9]]], 0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
index 37951669dbe755..9e7f5eb001a21c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
@@ -4,82 +4,239 @@
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=CHECK,GFX10 %s
 
 declare i32 @llvm.amdgcn.writelane(i32, i32, i32) #0
+declare i64 @llvm.amdgcn.writelane.i64(i64, i32, i64) #0
+declare double @llvm.amdgcn.writelane.f64(double, i32, double) #0
 
-; CHECK-LABEL: {{^}}test_writelane_sreg:
+; CHECK-LABEL: {{^}}test_writelane_sreg_i32:
 ; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
 ; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-define amdgpu_kernel void @test_writelane_sreg(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_sreg_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
   %oldval = load i32, ptr addrspace(1) %out
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 %src0, i32 %src1, i32 %oldval)
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_sreg:
+; CHECK-LABEL: {{^}}test_writelane_sreg_i64:
+; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_sreg_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+  %oldval = load i64, ptr addrspace(1) %out
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 %oldval)
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_sreg_f64:
+; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_sreg_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+  %oldval = load double, ptr addrspace(1) %out
+  %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double %oldval)
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_imm_sreg_i32:
 ; CHECK: v_writelane_b32 v{{[0-9]+}}, 32, s{{[0-9]+}}
-define amdgpu_kernel void @test_writelane_imm_sreg(ptr addrspace(1) %out, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_imm_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
   %oldval = load i32, ptr addrspace(1) %out
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 32, i32 %src1, i32 %oldval)
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 32, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_vreg_lane:
+; CHECK-LABEL: {{^}}test_writelane_imm_sreg_i64:
+; CHECK: v_writelane_b32 v{{[0-9]+}}, 32, s{{[0-9]+}}
+; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, 0, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_imm_sreg_i64(ptr addrspace(1) %out, i32 %src1) #1 {
+  %oldval = load i64, ptr addrspace(1) %out
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 32, i32 %src1, i64 %oldval)
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; TODO: fold both SGPR's
+; CHECK-LABEL: {{^}}test_writelane_imm_sreg_f64:
+; CHECK: s_mov_b32 [[SGPR:s[0-9]+]], 0x40400000
+; CIGFX9: v_writelane_b32 v{{[0-9]+}}, 0, m0
+; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], m0
+; GFX10: v_writelane_b32 v{{[0-9]+}}, 0, s{{[0-9]+}}
+; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_imm_sreg_f64(ptr addrspace(1) %out, i32 %src1) #1 {
+  %oldval = load double, ptr addrspace(1) %out
+  %writelane = call double @llvm.amdgcn.writelane.f64(double 32.0, i32 %src1, double %oldval)
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_vreg_lane_i32:
 ; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
 ; CHECK: v_writelane_b32 v{{[0-9]+}}, 12, [[LANE]]
-define amdgpu_kernel void @test_writelane_vreg_lane(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i32>, ptr addrspace(1) %gep.in
   %oldval = load i32, ptr addrspace(1) %out
   %lane = extractelement <2 x i32> %args, i32 1
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 12, i32 %lane, i32 %oldval)
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 12, i32 %lane, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_m0_sreg:
+; CHECK-LABEL: {{^}}test_writelane_vreg_lane_i64:
+; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
+; CHECK: v_writelane_b32 v{{[0-9]+}}, 12, [[LANE]]
+; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, 0, [[LANE]]
+define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep.in = getelementptr <2 x i64>, ptr addrspace(1) %in, i32 %tid
+  %args = load <2 x i64>, ptr addrspace(1) %gep.in
+  %oldval = load i64, ptr addrspace(1) %out
+  %lane = extractelement <2 x i64> %args, i32 1
+  %lane32 = trunc i64 %lane to i32
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 12, i32 %lane32, i64 %oldval)
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; TODO: fold both SGPR's
+; CHECK-LABEL: {{^}}test_writelane_vreg_lane_f64:
+; CHECK: s_mov_b32 [[SGPR:s[0-9]+]], 0x40280000
+; CHECK: v_readfirstlane_b32 [[LANE:.*]], v{{[0-9]+}}
+; CHECK: v_writelane_b32 v{{[0-9]+}}, 0, [[LANE]]
+; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], [[LANE]]
+define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep.in = getelementptr <2 x double>, ptr addrspace(1) %in, i32 %tid
+  %args = load <2 x double>, ptr addrspace(1) %gep.in
+  %oldval = load double, ptr addrspace(1) %out
+  %lane = extractelement <2 x double> %args, i32 1
+  %lane_cast = bitcast double %lane to i64
+  %lane32 = trunc i64 %lane_cast to i32
+  %writelane = call double @llvm.amdgcn.writelane.f64(double 12.0, i32 %lane32, double %oldval)
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_m0_sreg_i32:
 ; CHECK: s_mov_b32 m0, -1
 ; CIGFX9: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
 ; CIGFX9: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], m0
 ; GFX10: v_writelane_b32 v{{[0-9]+}}, m0, s{{[0-9]+}}
-define amdgpu_kernel void @test_writelane_m0_sreg(ptr addrspace(1) %out, i32 %src1) #1 {
+define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
   %oldval = load i32, ptr addrspace(1) %out
   %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 %m0, i32 %src1, i32 %oldval)
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %m0, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm:
+; CHECK-LABEL: {{^}}test_writelane_imm_i32:
 ; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
-define amdgpu_kernel void @test_writelane_imm(ptr addrspace(1) %out, i32 %src0) #1 {
+define amdgpu_kernel void @test_writelane_imm_i32(ptr addrspace(1) %out, i32 %src0) #1 {
   %oldval = load i32, ptr addrspace(1) %out
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 %src0, i32 32, i32 %oldval) #0
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 32, i32 %oldval) #0
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_oldval:
+; CHECK-LABEL: {{^}}test_writelane_imm_i64:
+; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
+; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
+define amdgpu_kernel void @test_writelane_imm_i64(ptr addrspace(1) %out, i64 %src0) #1 {
+  %oldval = load i64, ptr addrspace(1) %out
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 32, i64 %oldval) #0
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_imm_f64:
+; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
+; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
+define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double %src0) #1 {
+  %oldval = load double, ptr addrspace(1) %out
+  %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 32, double %oldval) #0
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_i32:
 ; CHECK: v_mov_b32_e32 [[OLDVAL:v[0-9]+]], s{{[0-9]+}}
 ; CIGFX9: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, m0
 ; GFX10: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, s{{[0-9]+}}
-define amdgpu_kernel void @test_writelane_sreg_oldval(i32 inreg %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 %src0, i32 %src1, i32 %oldval)
+define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_oldval:
+; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_i64:
+; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], s{{[0-9]+}}
+; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], s{{[0-9]+}}
+; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 %oldval)
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_f64:
+; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], s{{[0-9]+}}
+; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], s{{[0-9]+}}
+; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval, ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+  %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double %oldval)
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_imm_oldval_i32:
 ; CHECK: v_mov_b32_e32 [[OLDVAL:v[0-9]+]], 42
 ; CIGFX9: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, m0
 ; GFX10: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, s{{[0-9]+}}
-define amdgpu_kernel void @test_writelane_imm_oldval(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
-  %writelane = call i32 @llvm.amdgcn.writelane(i32 %src0, i32 %src1, i32 42)
+define amdgpu_kernel void @test_writelane_imm_oldval_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+  %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 42)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
+; CHECK-LABEL: {{^}}test_writelane_imm_oldval_i64:
+; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], 42
+; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], 0
+; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_imm_oldval_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+  %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 42)
+  store i64 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: {{^}}test_writelane_imm_oldval_f64:
+; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], 0
+; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], 0x40450000
+; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
+; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
+; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
+; GFX10-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
+define amdgpu_kernel void @test_writelane_imm_oldval_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+  %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double 42.0)
+  store double %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x() #2
 
 attributes #0 = { nounwind readnone convergent }

>From 776a4c667044edb75da9bfc21d88e54cac9b221e Mon Sep 17 00:00:00 2001
From: vikramRH <vikhegde at amd.com>
Date: Mon, 22 Apr 2024 09:58:11 +0000
Subject: [PATCH 09/11] address review comments

---
 clang/lib/CodeGen/CGBuiltin.cpp | 20 +++-----------------
 1 file changed, 3 insertions(+), 17 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 980d8c0887c4e4..2def06a9a3880c 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -18411,23 +18411,9 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     return Builder.CreateCall(F, Args);
   }
   case AMDGPU::BI__builtin_amdgcn_readlane:
-  case AMDGPU::BI__builtin_amdgcn_readfirstlane: {
-    llvm::SmallVector<llvm::Value *, 6> Args;
-    unsigned ICEArguments = 0;
-    ASTContext::GetBuiltinTypeError Error;
-    Intrinsic::ID IID = (BuiltinID == AMDGPU::BI__builtin_amdgcn_readlane)
-                            ? Intrinsic::amdgcn_readlane
-                            : Intrinsic::amdgcn_readfirstlane;
-
-    getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
-    assert(Error == ASTContext::GE_None && "Should not codegen an error");
-    for (unsigned I = 0; I != E->getNumArgs(); ++I) {
-      Args.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, I, E));
-    }
-
-    Function *F = CGM.getIntrinsic(IID, Args[0]->getType());
-    return Builder.CreateCall(F, Args);
-  }
+    return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_readlane);
+  case AMDGPU::BI__builtin_amdgcn_readfirstlane:
+    return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_readfirstlane);
   case AMDGPU::BI__builtin_amdgcn_div_fixup:
   case AMDGPU::BI__builtin_amdgcn_div_fixupf:
   case AMDGPU::BI__builtin_amdgcn_div_fixuph:

>From 82da530567a2e5facfdac0ee7707527a15289bb8 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Thu, 2 May 2024 17:07:30 +0000
Subject: [PATCH 10/11] Implement lowering in legalizer for legal types

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |    3 +
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h   |    4 +
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |   16 +
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |   92 +
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h  |    3 +
 llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp    |    3 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  177 +-
 llvm/lib/Target/AMDGPU/SIInstructions.td      |   37 +-
 llvm/lib/Target/AMDGPU/VOP1Instructions.td    |    2 +-
 llvm/lib/Target/AMDGPU/VOP2Instructions.td    |    4 +-
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       |  365 ++-
 .../CodeGen/AMDGPU/llvm.amdgcn.readlane.ll    |  593 +++-
 .../CodeGen/AMDGPU/llvm.amdgcn.writelane.ll   | 2585 ++++++++++++++++-
 13 files changed, 3493 insertions(+), 391 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index db69d50799e70b..b2f4e9c3eda59c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5459,6 +5459,9 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(LDS)
   NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
   NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
+  NODE_NAME_CASE(READLANE)
+  NODE_NAME_CASE(READFIRSTLANE)
+  NODE_NAME_CASE(WRITELANE)
   NODE_NAME_CASE(DUMMY_CHAIN)
   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
   NODE_NAME_CASE(LOAD_D16_HI)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index f10a357125e562..624f0ffa508441 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -541,6 +541,10 @@ enum NodeType : unsigned {
   FPTRUNC_ROUND_UPWARD,
   FPTRUNC_ROUND_DOWNWARD,
 
+  READLANE,
+  READFIRSTLANE,
+  WRITELANE,
+
   DUMMY_CHAIN,
   FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
   LOAD_D16_HI,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 82f58ea38fd0a7..a591fe76ff48ef 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -342,6 +342,10 @@ def AMDGPUfdot2_impl : SDNode<"AMDGPUISD::FDOT2",
 
 def AMDGPUperm_impl : SDNode<"AMDGPUISD::PERM", AMDGPUDTIntTernaryOp, []>;
 
+def AMDGPUreadlane_impl : SDNode<"AMDGPUISD::READLANE", SDTIntBinOp>;
+def AMDGPUreadfirstlane_impl : SDNode<"AMDGPUISD::READFIRSTLANE", SDTIntUnaryOp>;
+def AMDGPUwritelane_impl : SDNode<"AMDGPUISD::WRITELANE", AMDGPUDTIntTernaryOp>;
+
 // SI+ export
 def AMDGPUExportOp : SDTypeProfile<0, 8, [
   SDTCisInt<0>,       // i8 tgt
@@ -504,3 +508,15 @@ def AMDGPUdiv_fmas : PatFrags<(ops node:$src0, node:$src1, node:$src2, node:$vcc
 def AMDGPUperm : PatFrags<(ops node:$src0, node:$src1, node:$src2),
   [(int_amdgcn_perm node:$src0, node:$src1, node:$src2),
    (AMDGPUperm_impl node:$src0, node:$src1, node:$src2)]>;
+
+def AMDGPUreadlane : PatFrags<(ops node:$src0, node:$src1),
+  [(int_amdgcn_readlane node:$src0, node:$src1),
+   (AMDGPUreadlane_impl node:$src0, node:$src1)]>;
+
+def AMDGPUreadfirstlane : PatFrags<(ops node:$src),
+  [(int_amdgcn_readfirstlane node:$src),
+   (AMDGPUreadfirstlane_impl node:$src)]>;
+
+def AMDGPUwritelane : PatFrags<(ops node:$src0, node:$src1, node:$src2),
+  [(int_amdgcn_writelane node:$src0, node:$src1, node:$src2),
+   (AMDGPUwritelane_impl node:$src0, node:$src1, node:$src2)]>;
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e55d1de01b4fd1..8f0286164a7f11 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -5386,6 +5386,94 @@ bool AMDGPULegalizerInfo::legalizeDSAtomicFPIntrinsic(LegalizerHelper &Helper,
   return true;
 }
 
+bool AMDGPULegalizerInfo::legalizeLaneOp(LegalizerHelper &Helper,
+                                         MachineInstr &MI,
+                                         Intrinsic::ID IID) const {
+
+  MachineIRBuilder &B = Helper.MIRBuilder;
+  MachineRegisterInfo &MRI = *B.getMRI();
+
+  Register DstReg = MI.getOperand(0).getReg();
+  Register Src0 = MI.getOperand(2).getReg();
+
+  LLT Ty = MRI.getType(DstReg);
+  unsigned Size = Ty.getSizeInBits();
+
+  if (Size == 32)
+    return true;
+
+  if (Size < 32) {
+    auto Ext = B.buildAnyExt(LLT::scalar(32), Src0).getReg(0);
+    auto LaneOpDst =
+        B.buildIntrinsic(Intrinsic::amdgcn_readlane, {S32}).addUse(Ext);
+    if (IID == Intrinsic::amdgcn_readlane ||
+        IID == Intrinsic::amdgcn_writelane) {
+      auto Src1 = MI.getOperand(3).getReg();
+      LaneOpDst = LaneOpDst.addUse(Src1);
+      if (IID == Intrinsic::amdgcn_writelane) {
+        auto Src2 = MI.getOperand(4).getReg();
+        auto Ext2 = B.buildAnyExt(LLT::scalar(32), Src2).getReg(0);
+        LaneOpDst = LaneOpDst.addUse(Ext2);
+      }
+    }
+    B.buildTrunc(DstReg, LaneOpDst).getReg(0);
+  } else if ((Size % 32) == 0) {
+    SmallVector<Register, 2> Src0Parts, PartialRes;
+    unsigned NumParts = Size / 32;
+    auto WideReg = MRI.createGenericVirtualRegister(LLT::scalar(NumParts * 32));
+    for (unsigned i = 0; i < NumParts; ++i) {
+      Src0Parts.push_back(MRI.createGenericVirtualRegister(S32));
+    }
+
+    B.buildUnmerge(Src0Parts, Src0);
+
+    switch (IID) {
+    case Intrinsic::amdgcn_readlane: {
+      auto Src1 = MI.getOperand(3).getReg();
+      for (unsigned i = 0; i < NumParts; ++i)
+        PartialRes.push_back(
+            (B.buildIntrinsic(Intrinsic::amdgcn_readlane, {S32})
+                 .addUse(Src0Parts[i])
+                 .addUse(Src1))
+                .getReg(0));
+      break;
+    }
+    case Intrinsic::amdgcn_readfirstlane: {
+
+      for (unsigned i = 0; i < NumParts; ++i)
+        PartialRes.push_back(
+            (B.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, {S32})
+                 .addUse(Src0Parts[i]))
+                .getReg(0));
+
+      break;
+    }
+    case Intrinsic::amdgcn_writelane: {
+      auto Src1 = MI.getOperand(3).getReg();
+      auto Src2 = MI.getOperand(4).getReg();
+      SmallVector<Register, 2> Src2Parts;
+      for (unsigned i = 0; i < NumParts; ++i) {
+        Src2Parts.push_back(MRI.createGenericVirtualRegister(S32));
+      }
+      B.buildUnmerge(Src2Parts, Src2);
+
+      for (unsigned i = 0; i < NumParts; ++i)
+        PartialRes.push_back(
+            (B.buildIntrinsic(Intrinsic::amdgcn_writelane, {S32})
+                 .addUse(Src0Parts[i])
+                 .addUse(Src1)
+                 .addUse(Src2Parts[i]))
+                .getReg(0));
+    }
+    }
+    B.buildMergeLikeInstr(DstReg, PartialRes);
+  } else
+    return false;
+
+  MI.eraseFromParent();
+  return true;
+}
+
 bool AMDGPULegalizerInfo::getImplicitArgPtr(Register DstReg,
                                             MachineRegisterInfo &MRI,
                                             MachineIRBuilder &B) const {
@@ -7319,6 +7407,10 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     Observer.changedInstr(MI);
     return true;
   }
+  case Intrinsic::amdgcn_readlane:
+  case Intrinsic::amdgcn_writelane:
+  case Intrinsic::amdgcn_readfirstlane:
+    return legalizeLaneOp(Helper, MI, IntrID);
   default: {
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrID))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index e5ba84a74a0f8a..40e056154527f2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -208,6 +208,9 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
   bool legalizeBufferAtomic(MachineInstr &MI, MachineIRBuilder &B,
                             Intrinsic::ID IID) const;
 
+  bool legalizeLaneOp(LegalizerHelper &Helper, MachineInstr &MI,
+                      Intrinsic::ID IID) const;
+
   bool legalizeBVHIntrinsic(MachineInstr &MI, MachineIRBuilder &B) const;
 
   bool legalizeFPTruncRound(MachineInstr &MI, MachineIRBuilder &B) const;
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index d722b6fb56bccb..8b21c22b449710 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -691,8 +691,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
 
         break;
       }
-      case AMDGPU::V_WRITELANE_B32:
-      case AMDGPU::V_WRITELANE_PSEUDO_B64: {
+      case AMDGPU::V_WRITELANE_B32: {
         // Some architectures allow more than one constant bus access without
         // SGPR restriction
         if (ST.getConstantBusLimit(MI.getOpcode()) != 1)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ae928a6813a842..83f77c916834b3 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4822,111 +4822,6 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   return RetBB;
 }
 
-static MachineBasicBlock *lowerPseudoLaneOp(MachineInstr &MI,
-                                            MachineBasicBlock *BB,
-                                            const GCNSubtarget &ST,
-                                            unsigned Opc) {
-  MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
-  const SIRegisterInfo *TRI = ST.getRegisterInfo();
-  const DebugLoc &DL = MI.getDebugLoc();
-  const SIInstrInfo *TII = ST.getInstrInfo();
-
-  MachineOperand &Dest = MI.getOperand(0);
-  MachineOperand &Src0 = MI.getOperand(1);
-
-  const TargetRegisterClass *Src0RC =
-      Src0.isReg() ? MRI.getRegClass(Src0.getReg()) : &AMDGPU::SReg_64RegClass;
-  const TargetRegisterClass *Src0SubRC =
-      TRI->getSubRegisterClass(Src0RC, AMDGPU::sub0);
-
-  Register DestSub0 = MRI.createVirtualRegister(
-      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass
-                                              : &AMDGPU::SGPR_32RegClass);
-  Register DestSub1 = MRI.createVirtualRegister(
-      (Opc == AMDGPU::V_WRITELANE_PSEUDO_B64) ? &AMDGPU::VGPR_32RegClass
-                                              : &AMDGPU::SGPR_32RegClass);
-
-  MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm(
-      MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
-
-  MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm(
-      MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
-
-  MachineInstr *LoHalf, *HighHalf;
-  switch (Opc) {
-  case AMDGPU::V_READLANE_PSEUDO_B64: {
-    MachineOperand &Src1 = MI.getOperand(2);
-    auto IsKill = (Src1.isReg() && Src1.isKill());
-    if (IsKill)
-      Src1.setIsKill(false);
-    LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub0)
-                 .add(SrcReg0Sub0)
-                 .add(Src1);
-
-    if (IsKill)
-      Src1.setIsKill(true);
-    HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), DestSub1)
-                   .add(SrcReg0Sub1)
-                   .add(Src1);
-    break;
-  }
-  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64: {
-    LoHalf =
-        BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub0)
-            .add(SrcReg0Sub0);
-    HighHalf =
-        BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), DestSub1)
-            .add(SrcReg0Sub1);
-    break;
-  }
-  case AMDGPU::V_WRITELANE_PSEUDO_B64: {
-    MachineOperand &Src1 = MI.getOperand(2);
-    MachineOperand &Src2 = MI.getOperand(3);
-    auto IsKill = (Src1.isReg() && Src1.isKill());
-
-    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
-    const TargetRegisterClass *Src2SubRC =
-        TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-
-    MachineOperand SrcReg2Sub0 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src2, Src2RC, AMDGPU::sub0, Src2SubRC);
-
-    MachineOperand SrcReg2Sub1 = TII->buildExtractSubRegOrImm(
-        MI, MRI, Src2, Src2RC, AMDGPU::sub1, Src2SubRC);
-
-    if (IsKill)
-      Src1.setIsKill(false);
-
-    LoHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub0)
-                 .add(SrcReg0Sub0)
-                 .add(Src1)
-                 .add(SrcReg2Sub0);
-
-    if (IsKill)
-      Src1.setIsKill(true);
-    HighHalf = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), DestSub1)
-                   .add(SrcReg0Sub1)
-                   .add(Src1)
-                   .add(SrcReg2Sub1);
-    break;
-  }
-  default:
-    llvm_unreachable("should not occur");
-  }
-
-  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg())
-      .addReg(DestSub0)
-      .addImm(AMDGPU::sub0)
-      .addReg(DestSub1)
-      .addImm(AMDGPU::sub1);
-
-  TII->legalizeOperands(*LoHalf);
-  TII->legalizeOperands(*HighHalf);
-
-  MI.eraseFromParent();
-  return BB;
-}
-
 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
   MachineInstr &MI, MachineBasicBlock *BB) const {
 
@@ -5170,10 +5065,6 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
     MI.eraseFromParent();
     return BB;
   }
-  case AMDGPU::V_READLANE_PSEUDO_B64:
-  case AMDGPU::V_READFIRSTLANE_PSEUDO_B64:
-  case AMDGPU::V_WRITELANE_PSEUDO_B64:
-    return lowerPseudoLaneOp(MI, BB, *getSubtarget(), MI.getOpcode());
   case AMDGPU::SI_INIT_M0: {
     BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
             TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
@@ -6091,6 +5982,70 @@ static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
       DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE));
 }
 
+static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
+                           SelectionDAG &DAG) {
+  auto VT = N->getValueType(0);
+  unsigned ValSize = VT.getSizeInBits();
+  unsigned IntrinsicID = N->getConstantOperandVal(0);
+  SDValue Src0 = N->getOperand(1);
+  SDLoc SL(N);
+  MVT IntVT = MVT::getIntegerVT(ValSize);
+
+  auto createLaneOp = [&](SDValue &Src0, SDValue &Src1, SDValue &Src2,
+                          MVT VT) -> SDValue {
+    return (Src2.getNode()
+                ? DAG.getNode(AMDGPUISD::WRITELANE, SL, VT, {Src0, Src1, Src2})
+            : Src1.getNode()
+                ? DAG.getNode(AMDGPUISD::READLANE, SL, VT, {Src0, Src1})
+                : DAG.getNode(AMDGPUISD::READFIRSTLANE, SL, VT, {Src0}));
+  };
+
+  SDValue Src1, Src2, Src0Valid, Src2Valid;
+  if (IntrinsicID == Intrinsic::amdgcn_readlane ||
+      IntrinsicID == Intrinsic::amdgcn_writelane) {
+    Src1 = N->getOperand(2);
+    if (IntrinsicID == Intrinsic::amdgcn_writelane)
+      Src2 = N->getOperand(3);
+  }
+
+  if (ValSize == 32) {
+    if (VT == MVT::i32)
+      // Already legal
+      return SDValue();
+    Src0Valid = DAG.getBitcast(IntVT, Src0);
+    if (Src2.getNode())
+      Src2Valid = DAG.getBitcast(IntVT, Src2);
+    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
+    return DAG.getBitcast(VT, LaneOp);
+  }
+
+  if (ValSize < 32) {
+    auto InitBitCast = DAG.getBitcast(IntVT, Src0);
+    Src0Valid = DAG.getAnyExtOrTrunc(InitBitCast, SL, MVT::i32);
+    if (Src2.getNode()) {
+      auto Src2Cast = DAG.getBitcast(IntVT, Src2);
+      Src2Valid = DAG.getAnyExtOrTrunc(Src2Cast, SL, MVT::i32);
+    }
+    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
+    auto Trunc = DAG.getAnyExtOrTrunc(LaneOp, SL, IntVT);
+    return DAG.getBitcast(VT, Trunc);
+  }
+
+  if ((ValSize % 32) == 0) {
+    MVT VecVT = MVT::getVectorVT(MVT::i32, ValSize / 32);
+    Src0Valid = DAG.getBitcast(VecVT, Src0);
+
+    if (Src2.getNode())
+      Src2Valid = DAG.getBitcast(VecVT, Src2);
+
+    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, VecVT);
+    auto UnrolledLaneOp = DAG.UnrollVectorOp(LaneOp.getNode());
+    return DAG.getBitcast(VT, UnrolledLaneOp);
+  }
+
+  return SDValue();
+}
+
 void SITargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue> &Results,
                                           SelectionDAG &DAG) const {
@@ -8553,6 +8508,10 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   }
   case Intrinsic::amdgcn_addrspacecast_nonnull:
     return lowerADDRSPACECAST(Op, DAG);
+  case Intrinsic::amdgcn_readlane:
+  case Intrinsic::amdgcn_readfirstlane:
+  case Intrinsic::amdgcn_writelane:
+    return lowerLaneOp(*this, Op.getNode(), DAG);
   default:
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index e8ece71fe07b7b..6a3b6bf83f096a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -288,41 +288,6 @@ def V_SUB_U64_PSEUDO : VPseudoInstSI <
 >;
 } // End usesCustomInserter = 1, Defs = [VCC]
 
-
-let usesCustomInserter = 1 in {
-  def V_READLANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs SReg_64:$sdst), (ins VReg_64:$src0, SSrc_b32:$src1)>;
-
-  def V_READFIRSTLANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs SReg_64:$sdst), (ins VReg_64:$src0)>;
-  
-  def V_WRITELANE_PSEUDO_B64 : VPseudoInstSI <
-  (outs VReg_64:$sdst), (ins SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2)> {
-    let UseNamedOperandTable = 1;
-  }
-} // End usesCustomInserter = 1
-
-class ReadLanePseudoPat <ValueType vt> : GCNPat <
-  (vt (int_amdgcn_readlane vt:$src0, i32:$src1)),
-  (V_READLANE_PSEUDO_B64 VReg_64:$src0, SSrc_b32:$src1)>;
-
-def : ReadLanePseudoPat<i64>;
-def : ReadLanePseudoPat<f64>;
-
-class WriteLanePseudoPat <ValueType vt> : GCNPat <
-  (vt (int_amdgcn_writelane vt:$src0, i32:$src1, vt:$src2)),
-  (V_WRITELANE_PSEUDO_B64 SReg_64:$src0, SSrc_b32:$src1, VReg_64:$src2)>;
-
-def : WriteLanePseudoPat<i64>;
-def : WriteLanePseudoPat<f64>;
-
-class ReadFirstLanePseudoPat <ValueType vt> : GCNPat <
-  (vt (int_amdgcn_readfirstlane vt:$src0)),
-  (V_READFIRSTLANE_PSEUDO_B64 VReg_64:$src0)>;
-
-def : ReadFirstLanePseudoPat<i64>;
-def : ReadFirstLanePseudoPat<f64>;
-
 let usesCustomInserter = 1, Defs = [SCC] in {
 def S_ADD_U64_PSEUDO : SPseudoInstSI <
   (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1),
@@ -3440,7 +3405,7 @@ def : GCNPat<
 // FIXME: Should also do this for readlane, but tablegen crashes on
 // the ignored src1.
 def : GCNPat<
-  (i32 (int_amdgcn_readfirstlane (i32 imm:$src))),
+  (i32 (AMDGPUreadfirstlane (i32 imm:$src))),
   (S_MOV_B32 SReg_32:$src)
 >;
 
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 0ee80f45c91603..e9059c617ee9f3 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -243,7 +243,7 @@ def VOP_READFIRSTLANE : VOPProfile <[i32, i32, untyped, untyped]> {
 // FIXME: Specify SchedRW for READFIRSTLANE_B32
 // TODO: There is VOP3 encoding also
 def V_READFIRSTLANE_B32 : VOP1_Pseudo <"v_readfirstlane_b32", VOP_READFIRSTLANE,
-                                       getVOP1Pat<int_amdgcn_readfirstlane,
+                                       getVOP1Pat<AMDGPUreadfirstlane,
                                                   VOP_READFIRSTLANE>.ret, 1> {
   let isConvergent = 1;
 }
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index c001c5de81e0b7..34dee083c33a33 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -781,10 +781,10 @@ defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag,
 // These are special and do not read the exec mask.
 let isConvergent = 1, Uses = []<Register> in {
 def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE,
-  [(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))]>;
+  [(set i32:$vdst, (AMDGPUreadlane i32:$src0, i32:$src1))]>;
 let IsNeverUniform = 1, Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
 def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE,
-  [(set i32:$vdst, (int_amdgcn_writelane i32:$src0, i32:$src1, i32:$vdst_in))]>;
+  [(set i32:$vdst, (AMDGPUwritelane i32:$src0, i32:$src1, i32:$vdst_in))]>;
 } // End IsNeverUniform, $vdst = $vdst_in, DisableEncoding $vdst_in
 } // End isConvergent = 1
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index c5e0e9ffd3a9b4..08447f2a395acc 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -1,160 +1,387 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-SDAG -enable-var-scope %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -global-isel < %s | FileCheck -check-prefix=CHECK-GISEL -enable-var-scope %s
 
 declare i32 @llvm.amdgcn.readfirstlane(i32) #0
 declare i64 @llvm.amdgcn.readfirstlane.i64(i64) #0
 declare double @llvm.amdgcn.readfirstlane.f64(double) #0
 
-; CHECK-LABEL: {{^}}test_readfirstlane_i32:
-; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v2
 define void @test_readfirstlane_i32(ptr addrspace(1) %out, i32 %src) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %src)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_i64:
-; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
-; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
 define void @test_readfirstlane_i64(ptr addrspace(1) %out, i64 %src) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s5
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s4
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %src)
   store i64 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_f64:
-; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
-; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}}
 define void @test_readfirstlane_f64(ptr addrspace(1) %out, double %src) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s5
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s4
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s5
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
   %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double %src)
   store double %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_i32:
-; CHECK: s_mov_b32 [[SGPR_VAL:s[0-9]]], 32
-; CHECK-NOT: [[SGPR_VAL]]
-; CHECK: ; use [[SGPR_VAL]]
 define amdgpu_kernel void @test_readfirstlane_imm_i32(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_mov_b32 s0, 32
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_mov_b32 s0, 32
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 32)
   call void asm sideeffect "; use $0", "s"(i32 %readfirstlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_i64:
-; CHECK: s_mov_b64 [[SGPR_VAL:s\[[0-9]+:[0-9]+\]]], 32
-; CHECK: use [[SGPR_VAL]]
 define amdgpu_kernel void @test_readfirstlane_imm_i64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_mov_b64 s[0:1], 32
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_mov_b64 s[0:1], 32
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 32)
   call void asm sideeffect "; use $0", "s"(i64 %readfirstlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_f64:
-; CHECK: s_mov_b32 s[[VAL0:[0-9]+]], 0
-; CHECK: s_mov_b32 s[[VAL1:[0-9]+]], 0x40400000
-; use s[[VAL0\:VAL1]]
 define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_mov_b32 s0, 0
+; CHECK-SDAG-NEXT:    s_mov_b32 s1, 0x40400000
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_mov_b32 s0, 0
+; CHECK-GISEL-NEXT:    s_mov_b32 s1, 0x40400000
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double 32.0)
   call void asm sideeffect "; use $0", "s"(double %readfirstlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_fold_i32:
-; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32
-; CHECK-NOT: [[VVAL]]
-; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]]
 define amdgpu_kernel void @test_readfirstlane_imm_fold_i32(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, 32
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, 32
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 32)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_imm_fold_i64:
-; CHECK: s_mov_b64 s[[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]], 32
-; CHECK: v_mov_b32_e32 v[[RES0:[0-9]+]], s[[VAL0]]
-; CHECK: v_mov_b32_e32 v[[RES1:[0-9]+]], s[[VAL1]]
-; CHECK: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RES0]]:[[RES1]]]
 define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 32
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 32
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 32)
   store i64 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK: s_mov_b32 s[[VAL0:[0-9]+]], 0
-; CHECK: s_mov_b32 s[[VAL1:[0-9]+]], 0x40400000
-; CHECK: v_mov_b32_e32 v[[RES0:[0-9]+]], s[[VAL0]]
-; CHECK: v_mov_b32_e32 v[[RES1:[0-9]+]], s[[VAL1]]
-; CHECK: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RES0]]:[[RES1]]]
 define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_imm_fold_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40400000
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0
+; CHECK-GISEL-NEXT:    s_mov_b32 s3, 0x40400000
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double 32.0)
   store double %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_m0:
-; CHECK: s_mov_b32 m0, -1
-; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0
-; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]]
 define amdgpu_kernel void @test_readfirstlane_m0(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_m0:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b32 m0, -1
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, m0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_m0:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b32 m0, -1
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, m0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_i32:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b32 [[SGPR:s[0-9]+]]
-; CHECK: ;;#ASMEND
-; CHECK-NOT: [[SGPR]]
-; CHECK-NOT: readfirstlane
-; CHECK: v_mov_b32_e32 [[VCOPY:v[0-9]+]], [[SGPR]]
-; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VCOPY]]
 define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i32(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b32 s2, 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call i32 asm "s_mov_b32 $0, 0", "=s"()
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %sgpr)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_i64:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
-; CHECK: ;;#ASMEND
-; CHECK: v_readfirstlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}
-; CHECK: v_readfirstlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
-; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call i64 asm "s_mov_b64 $0, 0", "=s"()
   %readfirstlane = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %sgpr)
   store i64 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readfirstlane_copy_from_sgpr_f64:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
-; CHECK: ;;#ASMEND
-; CHECK: v_readfirstlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}
-; CHECK: v_readfirstlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
-; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_copy_from_sgpr_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call double asm "s_mov_b64 $0, 0", "=s"()
   %readfirstlane = call double @llvm.amdgcn.readfirstlane.f64(double %sgpr)
   store double %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; Make sure this doesn't crash.
-; CHECK-LABEL: {{^}}test_readfirstlane_fi:
-; CHECK: s_mov_b32 [[FIVAL:s[0-9]]], 0
 define amdgpu_kernel void @test_readfirstlane_fi(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readfirstlane_fi:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-SDAG-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-SDAG-NEXT:    s_mov_b32 s4, 0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_fi:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-GISEL-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-GISEL-NEXT:    s_mov_b32 s4, 0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %alloca = alloca i32, addrspace(5)
   %int = ptrtoint ptr addrspace(5) %alloca to i32
   %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %int)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
index 49bb8ca262e85b..7d2454182e8ec2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
@@ -1,98 +1,301 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-SDAG -enable-var-scope %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -global-isel < %s | FileCheck --check-prefix=CHECK-GISEL -enable-var-scope %s
 
 declare i32 @llvm.amdgcn.readlane.i32(i32, i32) #0
 declare i64 @llvm.amdgcn.readlane.i64(i64, i32) #0
 declare double @llvm.amdgcn.readlane.f64(double, i32) #0
 
-; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_i32:
-; CHECK-NOT: v_readlane_b32
 define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %src0, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i32 %readlane)
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_i64:
-; CHECK: v_mov_b32_e32 [[VREG0:v[0-9]+]], {{s[0-9]+}}
-; CHECK: v_mov_b32_e32 [[VREG1:v[0-9]+]], {{s[0-9]+}}
-; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG0]], {{s[0-9]+}}
-; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG1]], {{s[0-9]+}}
 define amdgpu_kernel void @test_readlane_sreg_sreg_i64(i64 %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %src0, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i64 %readlane)
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readlane_sreg_sreg_f64:
-; CHECK: v_mov_b32_e32 [[VREG0:v[0-9]+]], {{s[0-9]+}}
-; CHECK: v_mov_b32_e32 [[VREG1:v[0-9]+]], {{s[0-9]+}}
-; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG0]], {{s[0-9]+}}
-; CHECK: v_readlane_b32 {{s[0-9]+}}, [[VREG1]], {{s[0-9]+}}
 define amdgpu_kernel void @test_readlane_sreg_sreg_f64(double %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call double @llvm.amdgcn.readlane.f64(double %src0, i32 %src1)
   call void asm sideeffect "; use $0", "s"(double %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_i32:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_readlane_vreg_sreg_i32(i32 %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vreg_sreg_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dword s0, s[4:5], 0x4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vreg_sreg_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dword s0, s[4:5], 0x4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call i32 asm sideeffect "; def $0", "=v"()
   %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %vgpr, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i32 %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_i64:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_readlane_vreg_sreg_i64(i64 %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vreg_sreg_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dword s0, s[4:5], 0x8
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readlane_b32 s1, v1, s0
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vreg_sreg_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dword s1, s[4:5], 0x8
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readlane_b32 s0, v0, s1
+; CHECK-GISEL-NEXT:    v_readlane_b32 s1, v1, s1
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call i64 asm sideeffect "; def $0", "=v"()
   %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %vgpr, i32 %src1)
   call void asm sideeffect "; use $0", "s"(i64 %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vreg_sreg_f64:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_readlane_vreg_sreg_f64(double %src0, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vreg_sreg_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dword s0, s[4:5], 0x8
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readlane_b32 s1, v1, s0
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vreg_sreg_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dword s1, s[4:5], 0x8
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readlane_b32 s0, v0, s1
+; CHECK-GISEL-NEXT:    v_readlane_b32 s1, v1, s1
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call double asm sideeffect "; def $0", "=v"()
   %readlane = call double @llvm.amdgcn.readlane.f64(double %vgpr, i32 %src1)
   call void asm sideeffect "; use $0", "s"(double %readlane)
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_imm_sreg_i32:
-; CHECK-NOT: v_readlane_b32
 define amdgpu_kernel void @test_readlane_imm_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_imm_sreg_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, 32
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_imm_sreg_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, 32
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 32, i32 %src1)
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_imm_sreg_i64:
-; CHECK-NOT: v_readlane_b32
 define amdgpu_kernel void @test_readlane_imm_sreg_i64(ptr addrspace(1) %out, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_imm_sreg_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 32
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_imm_sreg_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 32
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 32, i32 %src1)
   store i64 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_imm_sreg_f64:
-; CHECK-NOT: v_readlane_b32
 define amdgpu_kernel void @test_readlane_imm_sreg_f64(ptr addrspace(1) %out, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_imm_sreg_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40400000
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_imm_sreg_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0
+; CHECK-GISEL-NEXT:    s_mov_b32 s3, 0x40400000
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %readlane = call double @llvm.amdgcn.readlane.f64(double 32.0, i32 %src1)
   store double %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vregs_i32:
-; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
 define amdgpu_kernel void @test_readlane_vregs_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vregs_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; CHECK-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-SDAG-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s0, v1
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    flat_store_dword v[2:3], v0
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vregs_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; CHECK-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s2, v1
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i32>, ptr addrspace(1) %gep.in
@@ -103,11 +306,49 @@ define amdgpu_kernel void @test_readlane_vregs_i32(ptr addrspace(1) %out, ptr ad
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vregs_i64:
-; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
 define amdgpu_kernel void @test_readlane_vregs_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vregs_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; CHECK-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-SDAG-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v4, s1
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s1, v1, s0
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[3:4], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vregs_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; CHECK-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-GISEL-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s3, v2
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, s3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s3, v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i64>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i64>, ptr addrspace(1) %gep.in
@@ -119,11 +360,49 @@ define amdgpu_kernel void @test_readlane_vregs_i64(ptr addrspace(1) %out, ptr ad
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vregs_f64:
-; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, [[LANE]]
 define amdgpu_kernel void @test_readlane_vregs_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vregs_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; CHECK-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-SDAG-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v4, s1
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s1, v1, s0
+; CHECK-SDAG-NEXT:    v_readlane_b32 s0, v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[3:4], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vregs_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; CHECK-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-GISEL-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s3, v2
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, s3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s3, v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x double>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x double>, ptr addrspace(1) %gep.in
@@ -136,90 +415,242 @@ define amdgpu_kernel void @test_readlane_vregs_f64(ptr addrspace(1) %out, ptr ad
   ret void
 }
 
-; TODO: m0 should be folded.
-; CHECK-LABEL: {{^}}test_readlane_m0_sreg:
-; CHECK: s_mov_b32 m0, -1
-; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0
-; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VVAL]]
 define amdgpu_kernel void @test_readlane_m0_sreg(ptr addrspace(1) %out, i32 %src1) #1 {
+; CHECK-SDAG-LABEL: test_readlane_m0_sreg:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b32 m0, -1
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, m0
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_m0_sreg:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b32 m0, -1
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, m0
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
   %readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1)
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_i32:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
 define amdgpu_kernel void @test_readlane_vgpr_imm_i32(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vgpr_imm_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_readlane_b32 s2, v0, 32
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vgpr_imm_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, 32
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call i32 asm sideeffect "; def $0", "=v"()
   %readlane = call i32 @llvm.amdgcn.readlane.i32(i32 %vgpr, i32 32) #0
   store i32 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_i64:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
 define amdgpu_kernel void @test_readlane_vgpr_imm_i64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vgpr_imm_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_readlane_b32 s2, v1, 32
+; CHECK-SDAG-NEXT:    v_readlane_b32 s3, v0, 32
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vgpr_imm_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, 32
+; CHECK-GISEL-NEXT:    v_readlane_b32 s3, v1, 32
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call i64 asm sideeffect "; def $0", "=v"()
   %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %vgpr, i32 32) #0
   store i64 %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_vgpr_imm_f64:
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
-; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32
 define amdgpu_kernel void @test_readlane_vgpr_imm_f64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_vgpr_imm_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; def v[0:1]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_readlane_b32 s2, v1, 32
+; CHECK-SDAG-NEXT:    v_readlane_b32 s3, v0, 32
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_vgpr_imm_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; def v[0:1]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_readlane_b32 s2, v0, 32
+; CHECK-GISEL-NEXT:    v_readlane_b32 s3, v1, 32
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %vgpr = call double asm sideeffect "; def $0", "=v"()
   %readlane = call double @llvm.amdgcn.readlane.f64(double %vgpr, i32 32) #0
   store double %readlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_i32:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b32 [[SGPR:s[0-9]+]]
-; CHECK: ;;#ASMEND
-; CHECK-NOT: [[SGPR]]
-; CHECK-NOT: readlane
-; CHECK: v_mov_b32_e32 [[VCOPY:v[0-9]+]], [[SGPR]]
-; CHECK: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[VCOPY]]
 define amdgpu_kernel void @test_readlane_copy_from_sgpr_i32(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_copy_from_sgpr_i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b32 s2, 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; CHECK-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call i32 asm "s_mov_b32 $0, 0", "=s"()
   %readfirstlane = call i32 @llvm.amdgcn.readlane.i32(i32 %sgpr, i32 7)
   store i32 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_i64:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
-; CHECK: ;;#ASMEND
-; CHECK: v_readlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}, 7
-; CHECK: v_readlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}, 7
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
-; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_readlane_copy_from_sgpr_i64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_copy_from_sgpr_i64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_i64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call i64 asm "s_mov_b64 $0, 0", "=s"()
   %readfirstlane = call i64 @llvm.amdgcn.readlane.i64(i64 %sgpr, i32 7)
   store i64 %readfirstlane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; TODO: should optimize this as for i32
-; CHECK-LABEL: {{^}}test_readlane_copy_from_sgpr_f64:
-; CHECK: ;;#ASMSTART
-; CHECK-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}
-; CHECK: ;;#ASMEND
-; CHECK: v_readlane_b32 [[SGPR0:s[0-9]+]], {{v[0-9]+}}, 7
-; CHECK: v_readlane_b32 [[SGPR1:s[0-9]+]], {{v[0-9]+}}, 7
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR0]]
-; CHECK: v_mov_b32_e32 {{v[0-9]+}}, [[SGPR1]]
-; CHECK: flat_store_dwordx2 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %out) #1 {
+; CHECK-SDAG-LABEL: test_readlane_copy_from_sgpr_f64:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT:    s_endpgm
+;
+; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_f64:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 0
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-GISEL-NEXT:    s_endpgm
   %sgpr = call double asm "s_mov_b64 $0, 0", "=s"()
   %readfirstlane = call double @llvm.amdgcn.readlane.f64(double %sgpr, i32 7)
   store double %readfirstlane, ptr addrspace(1) %out, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
index 9e7f5eb001a21c..f5f8fa3907f975 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
@@ -1,83 +1,993 @@
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,CIGFX9 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx802 -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,CIGFX9 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,GFX10 %s
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=CHECK,GFX10 %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX700-SDAG %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx802 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX802-SDAG %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX1010-SDAG %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GFX1100-SDAG %s
+
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx700 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX700-GISEL %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx802 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX802-GISEL %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1010 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX1010-GISEL %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 -global-isel < %s | FileCheck -check-prefixes=GFX1100-GISEL %s
 
 declare i32 @llvm.amdgcn.writelane(i32, i32, i32) #0
 declare i64 @llvm.amdgcn.writelane.i64(i64, i32, i64) #0
 declare double @llvm.amdgcn.writelane.f64(double, i32, double) #0
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_i32:
-; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX802-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dword s4, s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b32 s4, s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX802-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dword s4, s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b32 s4, s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i32, ptr addrspace(1) %out
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_i64:
-; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s2, s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s7, s2
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s6, s2
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s2, s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s6, s2
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s7, s2
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i64, ptr addrspace(1) %out
   %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 %oldval)
   store i64 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_f64:
-; CIGFX9: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s2, s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s7, s2
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s6, s2
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s2, s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s6, s2
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s7, s2
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load double, ptr addrspace(1) %out
   %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double %oldval)
   store double %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_sreg_i32:
-; CHECK: v_writelane_b32 v{{[0-9]+}}, 32, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_sreg_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, 32, s2
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_sreg_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, 32, s2
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_sreg_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, 32, s2
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_sreg_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, 32, s0
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_sreg_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 32, s2
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_sreg_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, 32, s2
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_sreg_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, 32, s2
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_sreg_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, 32, s0
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i32, ptr addrspace(1) %out
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 32, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_sreg_i64:
-; CHECK: v_writelane_b32 v{{[0-9]+}}, 32, s{{[0-9]+}}
-; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, 0, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_sreg_i64(ptr addrspace(1) %out, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_sreg_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x2
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_sreg_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x8
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_sreg_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, 0, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, 32, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_sreg_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s4, s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[0:1], s[2:3], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[2:3]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_sreg_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x2
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_sreg_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x8
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_sreg_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, 32, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, 0, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_sreg_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s4, s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[0:1], s[2:3], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, 32, s4
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, 0, s4
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[2:3]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i64, ptr addrspace(1) %out
   %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 32, i32 %src1, i64 %oldval)
   store i64 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; TODO: fold both SGPR's
-; CHECK-LABEL: {{^}}test_writelane_imm_sreg_f64:
-; CHECK: s_mov_b32 [[SGPR:s[0-9]+]], 0x40400000
-; CIGFX9: v_writelane_b32 v{{[0-9]+}}, 0, m0
-; CIGFX9-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], m0
-; GFX10: v_writelane_b32 v{{[0-9]+}}, 0, s{{[0-9]+}}
-; GFX10-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_sreg_f64(ptr addrspace(1) %out, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_sreg_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x2
+; GFX700-SDAG-NEXT:    s_mov_b32 s5, 0x40400000
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_sreg_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x8
+; GFX802-SDAG-NEXT:    s_mov_b32 s5, 0x40400000
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_sreg_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-SDAG-NEXT:    s_mov_b32 s2, 0x40400000
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s2, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, 0, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_sreg_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s4, s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[0:1], s[2:3], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-SDAG-NEXT:    s_mov_b32 s0, 0x40400000
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[2:3]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_sreg_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x2
+; GFX700-GISEL-NEXT:    s_mov_b32 s5, 0x40400000
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s4
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_sreg_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x8
+; GFX802-GISEL-NEXT:    s_mov_b32 s5, 0x40400000
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s4
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_sreg_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-GISEL-NEXT:    s_mov_b32 s2, 0x40400000
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, 0, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s2, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_sreg_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s4, s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[0:1], s[2:3], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1100-GISEL-NEXT:    s_mov_b32 s0, 0x40400000
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, 0, s4
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s0, s4
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[2:3]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load double, ptr addrspace(1) %out
   %writelane = call double @llvm.amdgcn.writelane.f64(double 32.0, i32 %src1, double %oldval)
   store double %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_vreg_lane_i32:
-; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
-; CHECK: v_writelane_b32 v{{[0-9]+}}, 12, [[LANE]]
 define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; GFX700-SDAG-LABEL: test_writelane_vreg_lane_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    flat_load_dword v0, v[0:1]
+; GFX700-SDAG-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s2
+; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    s_nop 2
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, 12, s2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_vreg_lane_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, 4, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    flat_load_dword v0, v[0:1]
+; GFX802-SDAG-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    s_nop 2
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, 12, s2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_vreg_lane_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dword v0, v0, s[2:3] offset:4
+; GFX1010-SDAG-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-SDAG-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1010-SDAG-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_vreg_lane_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b32 v0, v0, s[2:3] offset:4
+; GFX1100-SDAG-NEXT:    s_load_b32 s2, s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1100-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_vreg_lane_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    flat_load_dword v0, v[0:1]
+; GFX700-GISEL-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    s_nop 2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 12, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_vreg_lane_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, 4, v0
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    flat_load_dword v0, v[0:1]
+; GFX802-GISEL-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    s_nop 2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, 12, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_vreg_lane_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dword v0, v0, s[2:3] offset:4
+; GFX1010-GISEL-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-GISEL-NEXT:    s_load_dword s2, s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1010-GISEL-NEXT:    global_store_dword v0, v1, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_vreg_lane_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b32 v0, v0, s[2:3] offset:4
+; GFX1100-GISEL-NEXT:    s_load_b32 s2, s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1100-GISEL-NEXT:    global_store_b32 v0, v1, s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i32>, ptr addrspace(1) %gep.in
@@ -88,11 +998,184 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, p
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_vreg_lane_i64:
-; CHECK: v_readfirstlane_b32 [[LANE:s[0-9]+]], v{{[0-9]+}}
-; CHECK: v_writelane_b32 v{{[0-9]+}}, 12, [[LANE]]
-; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, 0, [[LANE]]
 define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; GFX700-SDAG-LABEL: test_writelane_vreg_lane_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    flat_load_dword v2, v[0:1]
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    s_nop 2
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 12, s2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_vreg_lane_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, 8, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    flat_load_dword v2, v[0:1]
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    s_nop 2
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, 12, s2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_vreg_lane_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dword v0, v0, s[2:3] offset:8
+; GFX1010-SDAG-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, 0, s3
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, 12, s3
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_vreg_lane_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b32 v0, v0, s[2:3] offset:8
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, 0, s3
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, 12, s3
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_vreg_lane_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v4, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s0
+; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX700-GISEL-NEXT:    s_nop 3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 0, s2
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_vreg_lane_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, 8, v0
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v4, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s0
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX802-GISEL-NEXT:    s_nop 3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, 0, s2
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_vreg_lane_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dwordx2 v[0:1], v0, s[2:3] offset:8
+; GFX1010-GISEL-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v2, 0, s2
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v0, v[1:2], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_vreg_lane_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b64 v[0:1], v0, s[2:3] offset:8
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v2, 0, s2
+; GFX1100-GISEL-NEXT:    global_store_b64 v0, v[1:2], s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x i64>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x i64>, ptr addrspace(1) %gep.in
@@ -104,13 +1187,196 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, p
   ret void
 }
 
-; TODO: fold both SGPR's
-; CHECK-LABEL: {{^}}test_writelane_vreg_lane_f64:
-; CHECK: s_mov_b32 [[SGPR:s[0-9]+]], 0x40280000
-; CHECK: v_readfirstlane_b32 [[LANE:.*]], v{{[0-9]+}}
-; CHECK: v_writelane_b32 v{{[0-9]+}}, 0, [[LANE]]
-; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, [[SGPR]], [[LANE]]
 define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; GFX700-SDAG-LABEL: test_writelane_vreg_lane_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX700-SDAG-NEXT:    s_mov_b32 s4, 0x40280000
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
+; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-SDAG-NEXT:    flat_load_dword v2, v[0:1]
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX700-SDAG-NEXT:    v_readfirstlane_b32 m0, v2
+; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    s_nop 1
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s4, m0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 0, s2
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_vreg_lane_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX802-SDAG-NEXT:    s_mov_b32 s4, 0x40280000
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v0, vcc, 8, v0
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    flat_load_dword v2, v[0:1]
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v2
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s4, m0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, 0, s2
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_vreg_lane_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dword v0, v0, s[2:3] offset:8
+; GFX1010-SDAG-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1010-SDAG-NEXT:    s_mov_b32 s2, 0x40280000
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s2, s3
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, 0, s3
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_vreg_lane_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b32 v0, v0, s[2:3] offset:8
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX1100-SDAG-NEXT:    s_mov_b32 s2, 0x40280000
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s2, s3
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, 0, s3
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_vreg_lane_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; GFX700-GISEL-NEXT:    s_mov_b32 s4, 0x40280000
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
+; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX700-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v4, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s0
+; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s2
+; GFX700-GISEL-NEXT:    s_nop 2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_vreg_lane_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
+; GFX802-GISEL-NEXT:    s_mov_b32 s4, 0x40280000
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v0, vcc, 8, v0
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v4, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s0
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s2
+; GFX802-GISEL-NEXT:    s_nop 2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_vreg_lane_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dwordx2 v[0:1], v0, s[2:3] offset:8
+; GFX1010-GISEL-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX1010-GISEL-NEXT:    s_mov_b32 s3, 0x40280000
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v2, s3, s2
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v0, v[1:2], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_vreg_lane_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b64 v[0:1], v0, s[2:3] offset:8
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX1100-GISEL-NEXT:    s_mov_b32 s3, 0x40280000
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, 0, s2
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v2, s3, s2
+; GFX1100-GISEL-NEXT:    global_store_b64 v0, v[1:2], s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %gep.in = getelementptr <2 x double>, ptr addrspace(1) %in, i32 %tid
   %args = load <2 x double>, ptr addrspace(1) %gep.in
@@ -123,12 +1389,154 @@ define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, p
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_m0_sreg_i32:
-; CHECK: s_mov_b32 m0, -1
-; CIGFX9: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
-; CIGFX9: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], m0
-; GFX10: v_writelane_b32 v{{[0-9]+}}, m0, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_m0_sreg_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-SDAG-NEXT:    ;;#ASMSTART
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, -1
+; GFX700-SDAG-NEXT:    ;;#ASMEND
+; GFX700-SDAG-NEXT:    s_mov_b32 s4, m0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_m0_sreg_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-SDAG-NEXT:    ;;#ASMSTART
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, -1
+; GFX802-SDAG-NEXT:    ;;#ASMEND
+; GFX802-SDAG-NEXT:    s_mov_b32 s4, m0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_m0_sreg_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    ;;#ASMSTART
+; GFX1010-SDAG-NEXT:    s_mov_b32 m0, -1
+; GFX1010-SDAG-NEXT:    ;;#ASMEND
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, m0, s2
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_m0_sreg_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    ;;#ASMSTART
+; GFX1100-SDAG-NEXT:    s_mov_b32 m0, -1
+; GFX1100-SDAG-NEXT:    ;;#ASMEND
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, m0, s0
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_m0_sreg_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-GISEL-NEXT:    ;;#ASMSTART
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, -1
+; GFX700-GISEL-NEXT:    ;;#ASMEND
+; GFX700-GISEL-NEXT:    s_mov_b32 s4, m0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s2
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_m0_sreg_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-GISEL-NEXT:    ;;#ASMSTART
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, -1
+; GFX802-GISEL-NEXT:    ;;#ASMEND
+; GFX802-GISEL-NEXT:    s_mov_b32 s4, m0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s2
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_m0_sreg_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    ;;#ASMSTART
+; GFX1010-GISEL-NEXT:    s_mov_b32 m0, -1
+; GFX1010-GISEL-NEXT:    ;;#ASMEND
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, m0, s2
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_m0_sreg_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    ;;#ASMSTART
+; GFX1100-GISEL-NEXT:    s_mov_b32 m0, -1
+; GFX1100-GISEL-NEXT:    ;;#ASMEND
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, m0, s0
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i32, ptr addrspace(1) %out
   %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %m0, i32 %src1, i32 %oldval)
@@ -136,102 +1544,1097 @@ define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_i32:
-; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
 define amdgpu_kernel void @test_writelane_imm_i32(ptr addrspace(1) %out, i32 %src0) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, 32
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s2, 32
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s0, 32
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, 32
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s3
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s2, 32
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s3
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b32 s1, s[2:3], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s0, 32
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[2:3]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i32, ptr addrspace(1) %out
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 32, i32 %oldval) #0
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_i64:
-; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
-; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
 define amdgpu_kernel void @test_writelane_imm_i64(ptr addrspace(1) %out, i64 %src0) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load i64, ptr addrspace(1) %out
   %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 32, i64 %oldval) #0
   store i64 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_f64:
-; CHECK: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
-; CHECK-NEXT: v_writelane_b32 v{{[0-9]+}}, s{{[0-9]+}}, 32
 define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double %src0) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %oldval = load double, ptr addrspace(1) %out
   %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 32, double %oldval) #0
   store double %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_i32:
-; CHECK: v_mov_b32_e32 [[OLDVAL:v[0-9]+]], s{{[0-9]+}}
-; CIGFX9: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s6
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s6
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_oldval_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_oldval_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b32 s4, s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x8
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s6
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_oldval_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x8
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_oldval_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b32 s4, s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x8
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 %oldval)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_i64:
-; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], s{{[0-9]+}}
-; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], s{{[0-9]+}}
-; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x6
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x18
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_oldval_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x2
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    s_load_dword s8, s[4:5], 0x18
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s7, s8
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s6, s8
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_oldval_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x2
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x18
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s3, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, s0
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[6:7]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x6
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x18
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_oldval_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x2
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    s_load_dword s8, s[4:5], 0x18
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s6, s8
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s7, s8
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_oldval_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x2
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x18
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, s0
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s3, s0
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[6:7]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 %oldval)
   store i64 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_sreg_oldval_f64:
-; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], s{{[0-9]+}}
-; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], s{{[0-9]+}}
-; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval, ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x6
+; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX700-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x18
+; GFX802-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s6
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s2
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s3
+; GFX802-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_sreg_oldval_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x2
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    s_load_dword s8, s[4:5], 0x18
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s7, s8
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s6, s8
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_sreg_oldval_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x2
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b64 s[2:3], s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x18
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s3, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, s0
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[6:7]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x6
+; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x18
+; GFX802-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s2
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s3
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_sreg_oldval_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x2
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    s_load_dword s8, s[4:5], 0x18
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s6, s8
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s7, s8
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_sreg_oldval_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x2
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b64 s[2:3], s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x18
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, s0
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s3, s0
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[6:7]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double %oldval)
   store double %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_oldval_i32:
-; CHECK: v_mov_b32_e32 [[OLDVAL:v[0-9]+]], 42
-; CIGFX9: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDVAL]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_oldval_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_oldval_i32:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, 42
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_oldval_i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, 42
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s3
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_oldval_i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-SDAG-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_oldval_i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_oldval_i32:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, 42
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_oldval_i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, 42
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s3
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, s0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, s1
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v2
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_oldval_i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1010-GISEL-NEXT:    global_store_dword v1, v0, s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_oldval_i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_load_b128 s[0:3], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s2, s3
+; GFX1100-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call i32 @llvm.amdgcn.writelane.i32(i32 %src0, i32 %src1, i32 42)
   store i32 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_oldval_i64:
-; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], 42
-; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], 0
-; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_oldval_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_oldval_i64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x4
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_oldval_i64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x10
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_oldval_i64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_oldval_i64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s7, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s6, s0
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_oldval_i64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_oldval_i64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_oldval_i64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_oldval_i64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 42
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s6, s0
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s7, s0
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call i64 @llvm.amdgcn.writelane.i64(i64 %src0, i32 %src1, i64 42)
   store i64 %writelane, ptr addrspace(1) %out, align 4
   ret void
 }
 
-; CHECK-LABEL: {{^}}test_writelane_imm_oldval_f64:
-; CHECK: v_mov_b32_e32 [[OLDSUB0:v[0-9]+]], 0
-; CHECK: v_mov_b32_e32 [[OLDSUB1:v[0-9]+]], 0x40450000
-; CIGFX9: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, m0
-; CIGFX9-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, m0
-; GFX10: v_writelane_b32 [[OLDSUB0]], s{{[0-9]+}}, s{{[0-9]+}}
-; GFX10-NEXT: v_writelane_b32 [[OLDSUB1]], s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @test_writelane_imm_oldval_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
+; GFX700-SDAG-LABEL: test_writelane_imm_oldval_f64:
+; GFX700-SDAG:       ; %bb.0:
+; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x4
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-SDAG-NEXT:    s_endpgm
+;
+; GFX802-SDAG-LABEL: test_writelane_imm_oldval_f64:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x10
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX802-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-SDAG-NEXT:    s_mov_b32 m0, s4
+; GFX802-SDAG-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-SDAG-NEXT:    s_endpgm
+;
+; GFX1010-SDAG-LABEL: test_writelane_imm_oldval_f64:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-SDAG-NEXT:    s_endpgm
+;
+; GFX1100-SDAG-LABEL: test_writelane_imm_oldval_f64:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-SDAG-NEXT:    s_load_b32 s0, s[0:1], 0x10
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v1, s7, s0
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v0, s6, s0
+; GFX1100-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-SDAG-NEXT:    s_nop 0
+; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-SDAG-NEXT:    s_endpgm
+;
+; GFX700-GISEL-LABEL: test_writelane_imm_oldval_f64:
+; GFX700-GISEL:       ; %bb.0:
+; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
+; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX700-GISEL-NEXT:    s_endpgm
+;
+; GFX802-GISEL-LABEL: test_writelane_imm_oldval_f64:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX802-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s6
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v3, s1
+; GFX802-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
+; GFX802-GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; GFX802-GISEL-NEXT:    s_endpgm
+;
+; GFX1010-GISEL-LABEL: test_writelane_imm_oldval_f64:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX1010-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX1010-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1010-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v0, s2, s6
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v1, s3, s6
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX1010-GISEL-NEXT:    s_endpgm
+;
+; GFX1100-GISEL-LABEL: test_writelane_imm_oldval_f64:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    s_load_b128 s[4:7], s[0:1], 0x0
+; GFX1100-GISEL-NEXT:    s_load_b32 s0, s[0:1], 0x10
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40450000
+; GFX1100-GISEL-NEXT:    v_mov_b32_e32 v2, 0
+; GFX1100-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v0, s6, s0
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v1, s7, s0
+; GFX1100-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
+; GFX1100-GISEL-NEXT:    s_nop 0
+; GFX1100-GISEL-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX1100-GISEL-NEXT:    s_endpgm
   %writelane = call double @llvm.amdgcn.writelane.f64(double %src0, i32 %src1, double 42.0)
   store double %writelane, ptr addrspace(1) %out, align 4
   ret void

>From 14fcf445e6757b5716e5a56f2d8d372d88075d68 Mon Sep 17 00:00:00 2001
From: Vikram <Vikram.Hegde at amd.com>
Date: Mon, 6 May 2024 05:40:45 +0000
Subject: [PATCH 11/11] refactor/improve GIsel lowering, added new tests

1. Review comments
2. improve GIsel lowering
3. add tests for half, bfloat, float2, ptr, vector of ptr and int
4. removed gfx700 checks from writelane tests since it caused issues with f16 legalization. is this required ?
---
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |    3 +-
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  112 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   24 +-
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       |  235 +++
 .../CodeGen/AMDGPU/llvm.amdgcn.readlane.ll    |  271 +++
 .../CodeGen/AMDGPU/llvm.amdgcn.writelane.ll   | 1555 ++++++++++-------
 6 files changed, 1503 insertions(+), 697 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index a591fe76ff48ef..ebb6e4de36c01f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -519,4 +519,5 @@ def AMDGPUreadfirstlane : PatFrags<(ops node:$src),
 
 def AMDGPUwritelane : PatFrags<(ops node:$src0, node:$src1, node:$src2),
   [(int_amdgcn_writelane node:$src0, node:$src1, node:$src2),
-   (AMDGPUwritelane_impl node:$src0, node:$src1, node:$src2)]>;
\ No newline at end of file
+   (AMDGPUwritelane_impl node:$src0, node:$src1, node:$src2)]>;
+   
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 8f0286164a7f11..faf70ca6fbdc94 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -5396,44 +5396,78 @@ bool AMDGPULegalizerInfo::legalizeLaneOp(LegalizerHelper &Helper,
   Register DstReg = MI.getOperand(0).getReg();
   Register Src0 = MI.getOperand(2).getReg();
 
+  auto createLaneOp = [&](Register &Src0, Register &Src1,
+                          Register &Src2) -> Register {
+    auto LaneOpDst = B.buildIntrinsic(IID, {S32}).addUse(Src0);
+    if (Src2.isValid())
+      return (LaneOpDst.addUse(Src1).addUse(Src2)).getReg(0);
+    if (Src1.isValid())
+      return (LaneOpDst.addUse(Src1)).getReg(0);
+    return LaneOpDst.getReg(0);
+  };
+
+  Register Src1, Src2, Src0Valid, Src2Valid;
+  if (IID == Intrinsic::amdgcn_readlane || IID == Intrinsic::amdgcn_writelane) {
+    Src1 = MI.getOperand(3).getReg();
+    if (IID == Intrinsic::amdgcn_writelane) {
+      Src2 = MI.getOperand(4).getReg();
+    }
+  }
+
   LLT Ty = MRI.getType(DstReg);
   unsigned Size = Ty.getSizeInBits();
 
-  if (Size == 32)
+  if (Size == 32) {
+    if (Ty.isScalar())
+      // Already legal
+      return true;
+
+    Register Src0Valid = B.buildBitcast(S32, Src0).getReg(0);
+    if (Src2.isValid())
+      Src2Valid = B.buildBitcast(S32, Src2).getReg(0);
+    Register LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid);
+    B.buildBitcast(DstReg, LaneOp);
+    MI.eraseFromParent();
     return true;
+  }
 
   if (Size < 32) {
-    auto Ext = B.buildAnyExt(LLT::scalar(32), Src0).getReg(0);
-    auto LaneOpDst =
-        B.buildIntrinsic(Intrinsic::amdgcn_readlane, {S32}).addUse(Ext);
-    if (IID == Intrinsic::amdgcn_readlane ||
-        IID == Intrinsic::amdgcn_writelane) {
-      auto Src1 = MI.getOperand(3).getReg();
-      LaneOpDst = LaneOpDst.addUse(Src1);
-      if (IID == Intrinsic::amdgcn_writelane) {
-        auto Src2 = MI.getOperand(4).getReg();
-        auto Ext2 = B.buildAnyExt(LLT::scalar(32), Src2).getReg(0);
-        LaneOpDst = LaneOpDst.addUse(Ext2);
-      }
+    Register Src0Cast = MRI.getType(Src0).isScalar()
+                            ? Src0
+                            : B.buildBitcast(LLT::scalar(Size), Src0).getReg(0);
+    Src0Valid = B.buildAnyExt(S32, Src0Cast).getReg(0);
+
+    if (Src2.isValid()) {
+      Register Src2Cast =
+          MRI.getType(Src2).isScalar()
+              ? Src2
+              : B.buildBitcast(LLT::scalar(Size), Src2).getReg(0);
+      Src2Valid = B.buildAnyExt(LLT::scalar(32), Src2Cast).getReg(0);
     }
-    B.buildTrunc(DstReg, LaneOpDst).getReg(0);
-  } else if ((Size % 32) == 0) {
-    SmallVector<Register, 2> Src0Parts, PartialRes;
-    unsigned NumParts = Size / 32;
-    auto WideReg = MRI.createGenericVirtualRegister(LLT::scalar(NumParts * 32));
-    for (unsigned i = 0; i < NumParts; ++i) {
-      Src0Parts.push_back(MRI.createGenericVirtualRegister(S32));
+    Register LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid);
+    if (Ty.isScalar())
+      B.buildTrunc(DstReg, LaneOp);
+    else {
+      auto Trunc = B.buildTrunc(LLT::scalar(Size), LaneOp);
+      B.buildBitcast(DstReg, Trunc);
     }
 
-    B.buildUnmerge(Src0Parts, Src0);
+    MI.eraseFromParent();
+    return true;
+  }
+
+  if ((Size % 32) == 0) {
+    SmallVector<Register, 2> PartialRes;
+    unsigned NumParts = Size / 32;
+    auto Src0Parts = B.buildUnmerge(S32, Src0);
 
     switch (IID) {
     case Intrinsic::amdgcn_readlane: {
-      auto Src1 = MI.getOperand(3).getReg();
+      Register Src1 = MI.getOperand(3).getReg();
       for (unsigned i = 0; i < NumParts; ++i)
         PartialRes.push_back(
             (B.buildIntrinsic(Intrinsic::amdgcn_readlane, {S32})
-                 .addUse(Src0Parts[i])
+                 .addUse(Src0Parts.getReg(i))
                  .addUse(Src1))
                 .getReg(0));
       break;
@@ -5443,35 +5477,37 @@ bool AMDGPULegalizerInfo::legalizeLaneOp(LegalizerHelper &Helper,
       for (unsigned i = 0; i < NumParts; ++i)
         PartialRes.push_back(
             (B.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, {S32})
-                 .addUse(Src0Parts[i]))
+                 .addUse(Src0Parts.getReg(i)))
                 .getReg(0));
 
       break;
     }
     case Intrinsic::amdgcn_writelane: {
-      auto Src1 = MI.getOperand(3).getReg();
-      auto Src2 = MI.getOperand(4).getReg();
-      SmallVector<Register, 2> Src2Parts;
-      for (unsigned i = 0; i < NumParts; ++i) {
-        Src2Parts.push_back(MRI.createGenericVirtualRegister(S32));
-      }
-      B.buildUnmerge(Src2Parts, Src2);
+      Register Src1 = MI.getOperand(3).getReg();
+      Register Src2 = MI.getOperand(4).getReg();
+      auto Src2Parts = B.buildUnmerge(S32, Src2);
 
       for (unsigned i = 0; i < NumParts; ++i)
         PartialRes.push_back(
             (B.buildIntrinsic(Intrinsic::amdgcn_writelane, {S32})
-                 .addUse(Src0Parts[i])
+                 .addUse(Src0Parts.getReg(i))
                  .addUse(Src1)
-                 .addUse(Src2Parts[i]))
+                 .addUse(Src2Parts.getReg(i)))
                 .getReg(0));
     }
     }
-    B.buildMergeLikeInstr(DstReg, PartialRes);
-  } else
-    return false;
 
-  MI.eraseFromParent();
-  return true;
+    if (Ty.isPointerVector()) {
+      auto MergedVec = B.buildMergeLikeInstr(
+          LLT::vector(ElementCount::getFixed(NumParts), S32), PartialRes);
+      B.buildBitcast(DstReg, MergedVec);
+    } else
+      B.buildMergeLikeInstr(DstReg, PartialRes);
+    MI.eraseFromParent();
+    return true;
+  }
+
+  return false;
 }
 
 bool AMDGPULegalizerInfo::getImplicitArgPtr(Register DstReg,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 83f77c916834b3..478211c32f8459 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5984,7 +5984,7 @@ static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
 
 static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
                            SelectionDAG &DAG) {
-  auto VT = N->getValueType(0);
+  EVT VT = N->getValueType(0);
   unsigned ValSize = VT.getSizeInBits();
   unsigned IntrinsicID = N->getConstantOperandVal(0);
   SDValue Src0 = N->getOperand(1);
@@ -5993,11 +5993,9 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
 
   auto createLaneOp = [&](SDValue &Src0, SDValue &Src1, SDValue &Src2,
                           MVT VT) -> SDValue {
-    return (Src2.getNode()
-                ? DAG.getNode(AMDGPUISD::WRITELANE, SL, VT, {Src0, Src1, Src2})
-            : Src1.getNode()
-                ? DAG.getNode(AMDGPUISD::READLANE, SL, VT, {Src0, Src1})
-                : DAG.getNode(AMDGPUISD::READFIRSTLANE, SL, VT, {Src0}));
+    return (Src2 ? DAG.getNode(AMDGPUISD::WRITELANE, SL, VT, {Src0, Src1, Src2})
+            : Src1 ? DAG.getNode(AMDGPUISD::READLANE, SL, VT, {Src0, Src1})
+                   : DAG.getNode(AMDGPUISD::READFIRSTLANE, SL, VT, {Src0}));
   };
 
   SDValue Src1, Src2, Src0Valid, Src2Valid;
@@ -6015,19 +6013,19 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
     Src0Valid = DAG.getBitcast(IntVT, Src0);
     if (Src2.getNode())
       Src2Valid = DAG.getBitcast(IntVT, Src2);
-    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
+    SDValue LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
     return DAG.getBitcast(VT, LaneOp);
   }
 
   if (ValSize < 32) {
-    auto InitBitCast = DAG.getBitcast(IntVT, Src0);
+    SDValue InitBitCast = DAG.getBitcast(IntVT, Src0);
     Src0Valid = DAG.getAnyExtOrTrunc(InitBitCast, SL, MVT::i32);
     if (Src2.getNode()) {
-      auto Src2Cast = DAG.getBitcast(IntVT, Src2);
+      SDValue Src2Cast = DAG.getBitcast(IntVT, Src2);
       Src2Valid = DAG.getAnyExtOrTrunc(Src2Cast, SL, MVT::i32);
     }
-    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
-    auto Trunc = DAG.getAnyExtOrTrunc(LaneOp, SL, IntVT);
+    SDValue LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, MVT::i32);
+    SDValue Trunc = DAG.getAnyExtOrTrunc(LaneOp, SL, IntVT);
     return DAG.getBitcast(VT, Trunc);
   }
 
@@ -6038,8 +6036,8 @@ static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
     if (Src2.getNode())
       Src2Valid = DAG.getBitcast(VecVT, Src2);
 
-    auto LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, VecVT);
-    auto UnrolledLaneOp = DAG.UnrollVectorOp(LaneOp.getNode());
+    SDValue LaneOp = createLaneOp(Src0Valid, Src1, Src2Valid, VecVT);
+    SDValue UnrolledLaneOp = DAG.UnrollVectorOp(LaneOp.getNode());
     return DAG.getBitcast(VT, UnrolledLaneOp);
   }
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index 08447f2a395acc..8600480b1148c9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -389,5 +389,240 @@ define amdgpu_kernel void @test_readfirstlane_fi(ptr addrspace(1) %out) #1 {
   ret void
 }
 
+define void @test_readfirstlane_half(ptr addrspace(1) %out, half %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_half:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_half:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call half @llvm.amdgcn.readfirstlane.f16(half %src)
+  call void asm sideeffect "; use $0", "s"(half %x)
+  ret void
+}
+
+define void @test_readfirstlane_float(ptr addrspace(1) %out, float %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_float:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_float:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call float @llvm.amdgcn.readfirstlane.f32(float %src)
+  call void asm sideeffect "; use $0", "s"(float %x)
+  ret void
+}
+
+define void @test_readfirstlane_bfloat(ptr addrspace(1) %out, bfloat %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_bfloat:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_bfloat:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call bfloat @llvm.amdgcn.readfirstlane.bf16(bfloat %src)
+  call void asm sideeffect "; use $0", "s"(bfloat %x)
+  ret void
+}
+
+define void @test_readfirstlane_i16(ptr addrspace(1) %out, i16 %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_i16:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 0xffff
+; CHECK-SDAG-NEXT:    v_and_b32_e32 v0, s4, v0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use v0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_i16:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call i16 @llvm.amdgcn.readfirstlane.i16(i16 %src)
+  call void asm sideeffect "; use $0", "s"(i16 %x)
+  ret void
+}
+
+define void @test_readfirstlane_v2f16(ptr addrspace(1) %out, <2 x half> %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_v2f16:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_v2f16:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <2 x half> @llvm.amdgcn.readfirstlane.v2f16(<2 x half> %src)
+  call void asm sideeffect "; use $0", "s"(<2 x half> %x)
+  ret void
+}
+
+define void @test_readfirstlane_v2f32(ptr addrspace(1) %out, <2 x float> %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_v2f32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:5]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_v2f32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:5]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <2 x float> @llvm.amdgcn.readfirstlane.v2f32(<2 x float> %src)
+  call void asm sideeffect "; use $0", "s"(<2 x float> %x)
+  ret void
+}
+
+define void @test_readfirstlane_v7i32(ptr addrspace(1) %out, <7 x i32> %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_v7i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s10, v8
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s9, v7
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s8, v6
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s7, v5
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s6, v4
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:10]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_v7i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s8, v6
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s9, v7
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s10, v8
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:10]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <7 x i32> @llvm.amdgcn.readfirstlane.v7i32(<7 x i32> %src)
+  call void asm sideeffect "; use $0", "s"(<7 x i32> %x)
+  ret void
+}
+
+define void @test_readfirstlane_p0(ptr addrspace(1) %out, ptr %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_p0:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:5]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_p0:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:5]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call ptr @llvm.amdgcn.readfirstlane.p0(ptr %src)
+  call void asm sideeffect "; use $0", "s"(ptr %x)
+  ret void
+}
+
+define void @test_readfirstlane_v3p0(ptr addrspace(1) %out, <3 x ptr> %src) {
+; CHECK-SDAG-LABEL: test_readfirstlane_v3p0:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s9, v7
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s8, v6
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s7, v5
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s6, v4
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:9]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readfirstlane_v3p0:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s6, v4
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s7, v5
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s8, v6
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s9, v7
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:9]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <3 x ptr> @llvm.amdgcn.readfirstlane.v3p0(<3 x ptr> %src)
+  call void asm sideeffect "; use $0", "s"(<3 x ptr> %x)
+  ret void
+}
+
 attributes #0 = { nounwind readnone convergent }
 attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
index 7d2454182e8ec2..47486d75630f3d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
@@ -657,6 +657,277 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %ou
   ret void
 }
 
+define void @test_readlane_half(ptr addrspace(1) %out, half %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_half:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_half:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call half @llvm.amdgcn.readlane.f16(half %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(half %x)
+  ret void
+}
+
+define void @test_readlane_float(ptr addrspace(1) %out, float %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_float:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_float:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call float @llvm.amdgcn.readlane.f32(float %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(float %x)
+  ret void
+}
+
+define void @test_readlane_bfloat(ptr addrspace(1) %out, bfloat %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_bfloat:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_bfloat:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call bfloat @llvm.amdgcn.readlane.bf16(bfloat %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(bfloat %x)
+  ret void
+}
+
+define void @test_readlane_i16(ptr addrspace(1) %out, i16 %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_i16:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, 0xffff
+; CHECK-SDAG-NEXT:    s_nop 2
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    v_and_b32_e32 v0, s4, v0
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use v0
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_i16:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call i16 @llvm.amdgcn.readlane.i16(i16 %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(i16 %x)
+  ret void
+}
+
+define void @test_readlane_v2f16(ptr addrspace(1) %out, <2 x half> %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_v2f16:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s4
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_v2f16:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s4, v3
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s4
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <2 x half> @llvm.amdgcn.readlane.v2f16(<2 x half> %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(<2 x half> %x)
+  ret void
+}
+
+define void @test_readlane_v2f32(ptr addrspace(1) %out, <2 x float> %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_v2f32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v4
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s5, v3, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:5]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_v2f32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s5
+; CHECK-GISEL-NEXT:    v_readlane_b32 s5, v3, s5
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:5]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <2 x float> @llvm.amdgcn.readlane.v2f32(<2 x float> %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(<2 x float> %x)
+  ret void
+}
+
+define void @test_readlane_v7i32(ptr addrspace(1) %out, <7 x i32> %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_v7i32:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v9
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s10, v8, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s9, v7, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s8, v6, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s7, v5, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s6, v4, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s5, v3, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:10]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_v7i32:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s10, v9
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s5, v3, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s6, v4, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s7, v5, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s8, v6, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s9, v7, s10
+; CHECK-GISEL-NEXT:    v_readlane_b32 s10, v8, s10
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:10]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <7 x i32> @llvm.amdgcn.readlane.v7i32(<7 x i32> %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(<7 x i32> %x)
+  ret void
+}
+
+define void @test_readlane_p0(ptr addrspace(1) %out, ptr %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_p0:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v4
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s5, v3, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:5]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_p0:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s5
+; CHECK-GISEL-NEXT:    v_readlane_b32 s5, v3, s5
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:5]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call ptr @llvm.amdgcn.readlane.p0(ptr %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(ptr %x)
+  ret void
+}
+
+define void @test_readlane_v3p0(ptr addrspace(1) %out, <3 x ptr> %src, i32 %src1) {
+; CHECK-SDAG-LABEL: test_readlane_v3p0:
+; CHECK-SDAG:       ; %bb.0:
+; CHECK-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-SDAG-NEXT:    v_readfirstlane_b32 s4, v8
+; CHECK-SDAG-NEXT:    s_nop 3
+; CHECK-SDAG-NEXT:    v_readlane_b32 s9, v7, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s8, v6, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s7, v5, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s6, v4, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s5, v3, s4
+; CHECK-SDAG-NEXT:    v_readlane_b32 s4, v2, s4
+; CHECK-SDAG-NEXT:    ;;#ASMSTART
+; CHECK-SDAG-NEXT:    ; use s[4:9]
+; CHECK-SDAG-NEXT:    ;;#ASMEND
+; CHECK-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; CHECK-GISEL-LABEL: test_readlane_v3p0:
+; CHECK-GISEL:       ; %bb.0:
+; CHECK-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-GISEL-NEXT:    v_readfirstlane_b32 s9, v8
+; CHECK-GISEL-NEXT:    s_nop 3
+; CHECK-GISEL-NEXT:    v_readlane_b32 s4, v2, s9
+; CHECK-GISEL-NEXT:    v_readlane_b32 s5, v3, s9
+; CHECK-GISEL-NEXT:    v_readlane_b32 s6, v4, s9
+; CHECK-GISEL-NEXT:    v_readlane_b32 s7, v5, s9
+; CHECK-GISEL-NEXT:    v_readlane_b32 s8, v6, s9
+; CHECK-GISEL-NEXT:    v_readlane_b32 s9, v7, s9
+; CHECK-GISEL-NEXT:    ;;#ASMSTART
+; CHECK-GISEL-NEXT:    ; use s[4:9]
+; CHECK-GISEL-NEXT:    ;;#ASMEND
+; CHECK-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %x = call <3 x ptr> @llvm.amdgcn.readlane.v3p0(<3 x ptr> %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(<3 x ptr> %x)
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x() #2
 
 attributes #0 = { nounwind readnone convergent }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
index f5f8fa3907f975..8c25cf3977858e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.writelane.ll
@@ -1,10 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX700-SDAG %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx802 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX802-SDAG %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX1010-SDAG %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GFX1100-SDAG %s
 
-; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx700 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX700-GISEL %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx802 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX802-GISEL %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1010 -verify-machineinstrs -global-isel < %s | FileCheck -check-prefixes=GFX1010-GISEL %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-vopd=0 -global-isel < %s | FileCheck -check-prefixes=GFX1100-GISEL %s
@@ -14,20 +12,6 @@ declare i64 @llvm.amdgcn.writelane.i64(i64, i32, i64) #0
 declare double @llvm.amdgcn.writelane.f64(double, i32, double) #0
 
 define amdgpu_kernel void @test_writelane_sreg_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
-; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -68,20 +52,6 @@ define amdgpu_kernel void @test_writelane_sreg_i32(ptr addrspace(1) %out, i32 %s
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
-; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -128,23 +98,6 @@ define amdgpu_kernel void @test_writelane_sreg_i32(ptr addrspace(1) %out, i32 %s
 }
 
 define amdgpu_kernel void @test_writelane_sreg_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -196,23 +149,6 @@ define amdgpu_kernel void @test_writelane_sreg_i64(ptr addrspace(1) %out, i64 %s
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -270,23 +206,6 @@ define amdgpu_kernel void @test_writelane_sreg_i64(ptr addrspace(1) %out, i64 %s
 }
 
 define amdgpu_kernel void @test_writelane_sreg_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -338,23 +257,6 @@ define amdgpu_kernel void @test_writelane_sreg_f64(ptr addrspace(1) %out, double
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -412,20 +314,6 @@ define amdgpu_kernel void @test_writelane_sreg_f64(ptr addrspace(1) %out, double
 }
 
 define amdgpu_kernel void @test_writelane_imm_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_sreg_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, 32, s2
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_sreg_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -470,20 +358,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_i32(ptr addrspace(1) %out, i3
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_sreg_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 32, s2
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_sreg_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -534,22 +408,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_i32(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @test_writelane_imm_sreg_i64(ptr addrspace(1) %out, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_sreg_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x2
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, 0, s4
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 32, s4
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_sreg_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -600,22 +458,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_i64(ptr addrspace(1) %out, i3
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_sreg_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x2
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, 32, s4
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 0, s4
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_sreg_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -672,24 +514,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_i64(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @test_writelane_imm_sreg_f64(ptr addrspace(1) %out, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_sreg_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x2
-; GFX700-SDAG-NEXT:    s_mov_b32 s5, 0x40400000
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s5, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 0, s4
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_sreg_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -745,24 +569,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_f64(ptr addrspace(1) %out, i3
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_sreg_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s4, s[4:5], 0x2
-; GFX700-GISEL-NEXT:    s_mov_b32 s5, 0x40400000
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s4
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, 0, s4
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_sreg_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -823,29 +629,6 @@ define amdgpu_kernel void @test_writelane_imm_sreg_f64(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
-; GFX700-SDAG-LABEL: test_writelane_vreg_lane_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    flat_load_dword v0, v[0:1]
-; GFX700-SDAG-NEXT:    s_load_dword s2, s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s2
-; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    s_nop 2
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, 12, s2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_vreg_lane_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -905,30 +688,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, p
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_vreg_lane_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 4, v0
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    flat_load_dword v0, v[0:1]
-; GFX700-GISEL-NEXT:    s_load_dword s2, s[0:1], 0x0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
-; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    s_nop 2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 12, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_vreg_lane_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -999,31 +758,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i32(ptr addrspace(1) %out, p
 }
 
 define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
-; GFX700-SDAG-LABEL: test_writelane_vreg_lane_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    flat_load_dword v2, v[0:1]
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    s_nop 2
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, 0, s2
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 12, s2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_vreg_lane_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1089,31 +823,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, p
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_vreg_lane_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v4, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s0
-; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX700-GISEL-NEXT:    s_nop 3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 12, s2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, 0, s2
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_vreg_lane_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1188,33 +897,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_i64(ptr addrspace(1) %out, p
 }
 
 define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
-; GFX700-SDAG-LABEL: test_writelane_vreg_lane_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    v_lshlrev_b32_e32 v0, 4, v0
-; GFX700-SDAG-NEXT:    s_mov_b32 s4, 0x40280000
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
-; GFX700-SDAG-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-SDAG-NEXT:    flat_load_dword v2, v[0:1]
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX700-SDAG-NEXT:    v_readfirstlane_b32 m0, v2
-; GFX700-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    s_nop 1
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s4, m0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, 0, s2
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_vreg_lane_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1284,33 +966,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, p
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_vreg_lane_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_lshlrev_b32_e32 v2, 4, v0
-; GFX700-GISEL-NEXT:    s_mov_b32 s4, 0x40280000
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    v_add_i32_e32 v0, vcc, 8, v0
-; GFX700-GISEL-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX700-GISEL-NEXT:    flat_load_dwordx2 v[0:1], v[0:1]
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v4, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s0
-; GFX700-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s2
-; GFX700-GISEL-NEXT:    s_nop 2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, 0, s2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[3:4], v[1:2]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_vreg_lane_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1390,25 +1045,6 @@ define amdgpu_kernel void @test_writelane_vreg_lane_f64(ptr addrspace(1) %out, p
 }
 
 define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_m0_sreg_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-SDAG-NEXT:    ;;#ASMSTART
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, -1
-; GFX700-SDAG-NEXT:    ;;#ASMEND
-; GFX700-SDAG-NEXT:    s_mov_b32 s4, m0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_m0_sreg_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -1464,25 +1100,6 @@ define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_m0_sreg_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-GISEL-NEXT:    ;;#ASMSTART
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, -1
-; GFX700-GISEL-NEXT:    ;;#ASMEND
-; GFX700-GISEL-NEXT:    s_mov_b32 s4, m0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s2
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s4, m0
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_m0_sreg_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -1545,20 +1162,6 @@ define amdgpu_kernel void @test_writelane_m0_sreg_i32(ptr addrspace(1) %out, i32
 }
 
 define amdgpu_kernel void @test_writelane_imm_i32(ptr addrspace(1) %out, i32 %src0) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, 32
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -1603,20 +1206,6 @@ define amdgpu_kernel void @test_writelane_imm_i32(ptr addrspace(1) %out, i32 %sr
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s2, s[4:5], 0x2
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dword s3, s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s3
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, 32
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -1667,21 +1256,6 @@ define amdgpu_kernel void @test_writelane_imm_i32(ptr addrspace(1) %out, i32 %sr
 }
 
 define amdgpu_kernel void @test_writelane_imm_i64(ptr addrspace(1) %out, i64 %src0) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1727,21 +1301,6 @@ define amdgpu_kernel void @test_writelane_imm_i64(ptr addrspace(1) %out, i64 %sr
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1793,21 +1352,6 @@ define amdgpu_kernel void @test_writelane_imm_i64(ptr addrspace(1) %out, i64 %sr
 }
 
 define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double %src0) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, 32
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, 32
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1853,21 +1397,6 @@ define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s4
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s5
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, 32
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, 32
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1919,19 +1448,6 @@ define amdgpu_kernel void @test_writelane_imm_f64(ptr addrspace(1) %out, double
 }
 
 define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s6
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x0
@@ -1971,19 +1487,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s6
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x0
@@ -2028,22 +1531,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i32(i32 inreg %oldval, ptr
 }
 
 define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x6
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2092,22 +1579,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x6
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s3
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2161,22 +1632,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_i64(i64 inreg %oldval, ptr
 }
 
 define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval, ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_sreg_oldval_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s6, s[4:5], 0x6
-; GFX700-SDAG-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s6
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s2
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s3
-; GFX700-SDAG-NEXT:    v_writelane_b32 v3, s5, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s4, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_sreg_oldval_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2225,22 +1680,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval,
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_sreg_oldval_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x6
-; GFX700-GISEL-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s2
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s4, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s5, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s3
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_sreg_oldval_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2294,18 +1733,6 @@ define amdgpu_kernel void @test_writelane_sreg_oldval_f64(double inreg %oldval,
 }
 
 define amdgpu_kernel void @test_writelane_imm_oldval_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_oldval_i32:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, 42
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s3
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-SDAG-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_oldval_i32:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2340,18 +1767,6 @@ define amdgpu_kernel void @test_writelane_imm_oldval_i32(ptr addrspace(1) %out,
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_oldval_i32:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, 42
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s3
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, s0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v2, s2, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, s1
-; GFX700-GISEL-NEXT:    flat_store_dword v[0:1], v2
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_oldval_i32:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2391,21 +1806,6 @@ define amdgpu_kernel void @test_writelane_imm_oldval_i32(ptr addrspace(1) %out,
 }
 
 define amdgpu_kernel void @test_writelane_imm_oldval_i64(ptr addrspace(1) %out, i64 %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_oldval_i64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x4
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, 0
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, 42
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_oldval_i64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2451,21 +1851,6 @@ define amdgpu_kernel void @test_writelane_imm_oldval_i64(ptr addrspace(1) %out,
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_oldval_i64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, 42
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, 0
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_oldval_i64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
@@ -2516,21 +1901,6 @@ define amdgpu_kernel void @test_writelane_imm_oldval_i64(ptr addrspace(1) %out,
 }
 
 define amdgpu_kernel void @test_writelane_imm_oldval_f64(ptr addrspace(1) %out, double %src0, i32 %src1) #1 {
-; GFX700-SDAG-LABEL: test_writelane_imm_oldval_f64:
-; GFX700-SDAG:       ; %bb.0:
-; GFX700-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-SDAG-NEXT:    s_load_dword s4, s[4:5], 0x4
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v1, 0x40450000
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v0, 0
-; GFX700-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-SDAG-NEXT:    s_mov_b32 m0, s4
-; GFX700-SDAG-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-SDAG-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-SDAG-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-SDAG-NEXT:    s_endpgm
-;
 ; GFX802-SDAG-LABEL: test_writelane_imm_oldval_f64:
 ; GFX802-SDAG:       ; %bb.0:
 ; GFX802-SDAG-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -2576,21 +1946,6 @@ define amdgpu_kernel void @test_writelane_imm_oldval_f64(ptr addrspace(1) %out,
 ; GFX1100-SDAG-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX1100-SDAG-NEXT:    s_endpgm
 ;
-; GFX700-GISEL-LABEL: test_writelane_imm_oldval_f64:
-; GFX700-GISEL:       ; %bb.0:
-; GFX700-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x4
-; GFX700-GISEL-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v0, 0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40450000
-; GFX700-GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX700-GISEL-NEXT:    s_mov_b32 m0, s6
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v3, s1
-; GFX700-GISEL-NEXT:    v_writelane_b32 v0, s2, m0
-; GFX700-GISEL-NEXT:    v_writelane_b32 v1, s3, m0
-; GFX700-GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GFX700-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; GFX700-GISEL-NEXT:    s_endpgm
-;
 ; GFX802-GISEL-LABEL: test_writelane_imm_oldval_f64:
 ; GFX802-GISEL:       ; %bb.0:
 ; GFX802-GISEL-NEXT:    s_load_dword s6, s[4:5], 0x10
@@ -2640,6 +1995,916 @@ define amdgpu_kernel void @test_writelane_imm_oldval_f64(ptr addrspace(1) %out,
   ret void
 }
 
+define void @test_writelane_half(ptr addrspace(1) %out, half %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_half:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_short v[0:1], v4
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_half:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-SDAG-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_half:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-SDAG-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_half:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_short v[0:1], v4
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_half:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-GISEL-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_half:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-GISEL-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load half, ptr addrspace(1) %out
+  %writelane = call half @llvm.amdgcn.writelane.f16(half %src, i32 %src1, half %oldval)
+  store half %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_float(ptr addrspace(1) %out, float %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_float:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_dword v4, v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v4
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_float:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dword v4, v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-SDAG-NEXT:    global_store_dword v[0:1], v4, off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_float:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b32 v4, v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-SDAG-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_float:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_dword v4, v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v4
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_float:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dword v4, v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-GISEL-NEXT:    global_store_dword v[0:1], v4, off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_float:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b32 v4, v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-GISEL-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load float, ptr addrspace(1) %out
+  %writelane = call float @llvm.amdgcn.writelane.f32(float %src, i32 %src1, float %oldval)
+  store float %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_bfloat(ptr addrspace(1) %out, bfloat %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_bfloat:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_short v[0:1], v4
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_bfloat:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-SDAG-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_bfloat:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-SDAG-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_bfloat:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_short v[0:1], v4
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_bfloat:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-GISEL-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_bfloat:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-GISEL-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load bfloat, ptr addrspace(1) %out
+  %writelane = call bfloat @llvm.amdgcn.writelane.bf16(bfloat %src, i32 %src1, bfloat %oldval)
+  store bfloat %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_i16(ptr addrspace(1) %out, i16 %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_i16:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_short v[0:1], v4
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_i16:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-SDAG-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_i16:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-SDAG-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_i16:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_ushort v4, v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_short v[0:1], v4
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_i16:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_ushort v4, v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-GISEL-NEXT:    global_store_short v[0:1], v4, off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_i16:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_u16 v4, v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-GISEL-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load i16, ptr addrspace(1) %out
+  %writelane = call i16 @llvm.amdgcn.writelane.i16(i16 %src, i32 %src1, i16 %oldval)
+  store i16 %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_v2f16(ptr addrspace(1) %out, <2 x half> %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_v2f16:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_dword v4, v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 1
+; GFX802-SDAG-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-SDAG-NEXT:    flat_store_dword v[0:1], v4
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_v2f16:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dword v4, v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-SDAG-NEXT:    global_store_dword v[0:1], v4, off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_v2f16:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b32 v4, v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-SDAG-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_v2f16:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_dword v4, v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v4, s4, m0
+; GFX802-GISEL-NEXT:    flat_store_dword v[0:1], v4
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_v2f16:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dword v4, v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v4, s4, s5
+; GFX1010-GISEL-NEXT:    global_store_dword v[0:1], v4, off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_v2f16:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b32 v4, v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v4, s0, s1
+; GFX1100-GISEL-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load <2 x half>, ptr addrspace(1) %out
+  %writelane = call <2 x half> @llvm.amdgcn.writelane.v2f16(<2 x half> %src, i32 %src1, <2 x half> %oldval)
+  store <2 x half> %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_readlane_v2f32(ptr addrspace(1) %out, <2 x float> %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_readlane_v2f32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_dwordx2 v[5:6], v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v4
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v6, s4, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v5, s5, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[5:6]
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_readlane_v2f32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dwordx2 v[5:6], v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s6, v2
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v6, s4, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v5, s6, s5
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v[0:1], v[5:6], off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_readlane_v2f32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b64 v[5:6], v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v3
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v6, s0, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v5, s2, s1
+; GFX1100-SDAG-NEXT:    global_store_b64 v[0:1], v[5:6], off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_readlane_v2f32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_dwordx2 v[5:6], v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v5, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v6, s6, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[5:6]
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_readlane_v2f32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dwordx2 v[5:6], v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v5, s4, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v6, s6, s5
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[5:6], off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_readlane_v2f32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b64 v[5:6], v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v5, s0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v6, s2, s1
+; GFX1100-GISEL-NEXT:    global_store_b64 v[0:1], v[5:6], off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load <2 x float>, ptr addrspace(1) %out
+  %writelane = call <2 x float> @llvm.amdgcn.writelane.v2f32(<2 x float> %src, i32 %src1, <2 x float> %oldval)
+  store <2 x float> %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_v7i32(ptr addrspace(1) %out, <7 x i32> %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_v7i32:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v17, vcc, 16, v0
+; GFX802-SDAG-NEXT:    flat_load_dwordx4 v[10:13], v[0:1]
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v18, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    flat_load_dwordx3 v[14:16], v[17:18]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v9
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s8, v4
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s9, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v8
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s5, v7
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s6, v6
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX802-SDAG-NEXT:    v_writelane_b32 v13, s7, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v12, s8, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v11, s9, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v10, s10, m0
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    v_writelane_b32 v16, s4, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v15, s5, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v14, s6, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx4 v[0:1], v[10:13]
+; GFX802-SDAG-NEXT:    flat_store_dwordx3 v[17:18], v[14:16]
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_v7i32:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    global_load_dwordx3 v[14:16], v[0:1], off offset:16
+; GFX1010-SDAG-NEXT:    global_load_dwordx4 v[10:13], v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v9
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s8, v5
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s9, v4
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s10, v3
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s11, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v8
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s6, v7
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s7, v6
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v16, s4, s5
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v13, s8, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v12, s9, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v11, s10, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v10, s11, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v15, s6, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v14, s7, s5
+; GFX1010-SDAG-NEXT:    global_store_dwordx4 v[0:1], v[10:13], off
+; GFX1010-SDAG-NEXT:    global_store_dwordx3 v[0:1], v[14:16], off offset:16
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_v7i32:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    global_load_b96 v[14:16], v[0:1], off offset:16
+; GFX1100-SDAG-NEXT:    global_load_b128 v[10:13], v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v9
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s4, v5
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s7, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v8
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s2, v7
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s3, v6
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v16, s0, s1
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v13, s4, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v12, s5, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v11, s6, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v10, s7, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v15, s2, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v14, s3, s1
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    global_store_b128 v[0:1], v[10:13], off
+; GFX1100-SDAG-NEXT:    global_store_b96 v[0:1], v[14:16], off offset:16
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_v7i32:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v18, vcc, 16, v0
+; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[10:13], v[0:1]
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v19, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[14:17], v[18:19]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v9
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s7, v4
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s8, v5
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s9, v6
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s10, v7
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s11, v8
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v10, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v11, s6, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v12, s7, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v13, s8, m0
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v14, s9, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v15, s10, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v16, s11, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx4 v[0:1], v[10:13]
+; GFX802-GISEL-NEXT:    flat_store_dwordx3 v[18:19], v[14:16]
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_v7i32:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    global_load_dwordx4 v[10:13], v[0:1], off
+; GFX1010-GISEL-NEXT:    global_load_dwordx4 v[14:17], v[0:1], off offset:16
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v9
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s7, v4
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s8, v5
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s9, v6
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s10, v7
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s11, v8
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v10, s4, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v11, s6, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v12, s7, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v13, s8, s5
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v14, s9, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v15, s10, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v16, s11, s5
+; GFX1010-GISEL-NEXT:    global_store_dwordx4 v[0:1], v[10:13], off
+; GFX1010-GISEL-NEXT:    global_store_dwordx3 v[0:1], v[14:16], off offset:16
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_v7i32:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    global_load_b128 v[10:13], v[0:1], off
+; GFX1100-GISEL-NEXT:    global_load_b128 v[14:17], v[0:1], off offset:16
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v9
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s3, v4
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s4, v5
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s5, v6
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s6, v7
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s7, v8
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v10, s0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v11, s2, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v12, s3, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v13, s4, s1
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v14, s5, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v15, s6, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v16, s7, s1
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    global_store_b128 v[0:1], v[10:13], off
+; GFX1100-GISEL-NEXT:    global_store_b96 v[0:1], v[14:16], off offset:16
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load <7 x i32>, ptr addrspace(1) %out
+  %writelane = call <7 x i32> @llvm.amdgcn.writelane.v7i32(<7 x i32> %src, i32 %src1, <7 x i32> %oldval)
+  store <7 x i32> %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_p0(ptr addrspace(1) %out, ptr %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_p0:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    flat_load_dwordx2 v[5:6], v[0:1]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v4
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_nop 0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v6, s4, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v5, s5, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[5:6]
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_p0:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    global_load_dwordx2 v[5:6], v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v3
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s6, v2
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v6, s4, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v5, s6, s5
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v[0:1], v[5:6], off
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_p0:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    global_load_b64 v[5:6], v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v3
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v6, s0, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v5, s2, s1
+; GFX1100-SDAG-NEXT:    global_store_b64 v[0:1], v[5:6], off
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_p0:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    flat_load_dwordx2 v[5:6], v[0:1]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v5, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v6, s6, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[0:1], v[5:6]
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_p0:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    global_load_dwordx2 v[5:6], v[0:1], off
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v4
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v5, s4, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v6, s6, s5
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[5:6], off
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_p0:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    global_load_b64 v[5:6], v[0:1], off
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v5, s0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v6, s2, s1
+; GFX1100-GISEL-NEXT:    global_store_b64 v[0:1], v[5:6], off
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load ptr, ptr addrspace(1) %out
+  %writelane = call ptr @llvm.amdgcn.writelane.p0(ptr %src, i32 %src1, ptr %oldval)
+  store ptr %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define void @test_writelane_v3p0(ptr addrspace(1) %out, <3 x ptr> %src, i32 %src1) {
+; GFX802-SDAG-LABEL: test_writelane_v3p0:
+; GFX802-SDAG:       ; %bb.0:
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-SDAG-NEXT:    v_add_u32_e32 v13, vcc, 16, v0
+; GFX802-SDAG-NEXT:    flat_load_dwordx4 v[9:12], v[0:1]
+; GFX802-SDAG-NEXT:    v_addc_u32_e32 v14, vcc, 0, v1, vcc
+; GFX802-SDAG-NEXT:    flat_load_dwordx2 v[15:16], v[13:14]
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 m0, v8
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s6, v5
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s7, v4
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s8, v3
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s9, v2
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s4, v7
+; GFX802-SDAG-NEXT:    v_readfirstlane_b32 s5, v6
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX802-SDAG-NEXT:    v_writelane_b32 v12, s6, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v11, s7, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v10, s8, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v9, s9, m0
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    v_writelane_b32 v16, s4, m0
+; GFX802-SDAG-NEXT:    v_writelane_b32 v15, s5, m0
+; GFX802-SDAG-NEXT:    flat_store_dwordx4 v[0:1], v[9:12]
+; GFX802-SDAG-NEXT:    flat_store_dwordx2 v[13:14], v[15:16]
+; GFX802-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-SDAG-LABEL: test_writelane_v3p0:
+; GFX1010-SDAG:       ; %bb.0:
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-SDAG-NEXT:    s_clause 0x1
+; GFX1010-SDAG-NEXT:    global_load_dwordx2 v[13:14], v[0:1], off offset:16
+; GFX1010-SDAG-NEXT:    global_load_dwordx4 v[9:12], v[0:1], off
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s5, v8
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s7, v5
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s8, v4
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s9, v3
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s10, v2
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s4, v7
+; GFX1010-SDAG-NEXT:    v_readfirstlane_b32 s6, v6
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v14, s4, s5
+; GFX1010-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v12, s7, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v11, s8, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v10, s9, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v9, s10, s5
+; GFX1010-SDAG-NEXT:    v_writelane_b32 v13, s6, s5
+; GFX1010-SDAG-NEXT:    global_store_dwordx4 v[0:1], v[9:12], off
+; GFX1010-SDAG-NEXT:    global_store_dwordx2 v[0:1], v[13:14], off offset:16
+; GFX1010-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-SDAG-LABEL: test_writelane_v3p0:
+; GFX1100-SDAG:       ; %bb.0:
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    global_load_b64 v[13:14], v[0:1], off offset:16
+; GFX1100-SDAG-NEXT:    global_load_b128 v[9:12], v[0:1], off
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s1, v8
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s3, v5
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s4, v4
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s6, v2
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s0, v7
+; GFX1100-SDAG-NEXT:    v_readfirstlane_b32 s2, v6
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(1)
+; GFX1100-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v14, s0, s1
+; GFX1100-SDAG-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v12, s3, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v11, s4, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v10, s5, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v9, s6, s1
+; GFX1100-SDAG-NEXT:    v_writelane_b32 v13, s2, s1
+; GFX1100-SDAG-NEXT:    s_clause 0x1
+; GFX1100-SDAG-NEXT:    global_store_b128 v[0:1], v[9:12], off
+; GFX1100-SDAG-NEXT:    global_store_b64 v[0:1], v[13:14], off offset:16
+; GFX1100-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX802-GISEL-LABEL: test_writelane_v3p0:
+; GFX802-GISEL:       ; %bb.0:
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX802-GISEL-NEXT:    v_add_u32_e32 v17, vcc, 16, v0
+; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[9:12], v[0:1]
+; GFX802-GISEL-NEXT:    v_addc_u32_e32 v18, vcc, 0, v1, vcc
+; GFX802-GISEL-NEXT:    flat_load_dwordx4 v[13:16], v[17:18]
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s5, v8
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s7, v4
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s8, v5
+; GFX802-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s9, v6
+; GFX802-GISEL-NEXT:    v_readfirstlane_b32 s10, v7
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v9, s4, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v10, s6, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v11, s7, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v12, s8, m0
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    v_writelane_b32 v13, s9, m0
+; GFX802-GISEL-NEXT:    v_writelane_b32 v14, s10, m0
+; GFX802-GISEL-NEXT:    flat_store_dwordx4 v[0:1], v[9:12]
+; GFX802-GISEL-NEXT:    flat_store_dwordx2 v[17:18], v[13:14]
+; GFX802-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX802-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1010-GISEL-LABEL: test_writelane_v3p0:
+; GFX1010-GISEL:       ; %bb.0:
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1010-GISEL-NEXT:    s_clause 0x1
+; GFX1010-GISEL-NEXT:    global_load_dwordx4 v[9:12], v[0:1], off
+; GFX1010-GISEL-NEXT:    global_load_dwordx4 v[13:16], v[0:1], off offset:16
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s4, v2
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s5, v8
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s6, v3
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s7, v4
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s8, v5
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s9, v6
+; GFX1010-GISEL-NEXT:    v_readfirstlane_b32 s10, v7
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v9, s4, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v10, s6, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v11, s7, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v12, s8, s5
+; GFX1010-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v13, s9, s5
+; GFX1010-GISEL-NEXT:    v_writelane_b32 v14, s10, s5
+; GFX1010-GISEL-NEXT:    global_store_dwordx4 v[0:1], v[9:12], off
+; GFX1010-GISEL-NEXT:    global_store_dwordx2 v[0:1], v[13:14], off offset:16
+; GFX1010-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1100-GISEL-LABEL: test_writelane_v3p0:
+; GFX1100-GISEL:       ; %bb.0:
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    global_load_b128 v[9:12], v[0:1], off
+; GFX1100-GISEL-NEXT:    global_load_b128 v[13:16], v[0:1], off offset:16
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s0, v2
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s1, v8
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s2, v3
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s3, v4
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s4, v5
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s5, v6
+; GFX1100-GISEL-NEXT:    v_readfirstlane_b32 s6, v7
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(1)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v9, s0, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v10, s2, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v11, s3, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v12, s4, s1
+; GFX1100-GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v13, s5, s1
+; GFX1100-GISEL-NEXT:    v_writelane_b32 v14, s6, s1
+; GFX1100-GISEL-NEXT:    s_clause 0x1
+; GFX1100-GISEL-NEXT:    global_store_b128 v[0:1], v[9:12], off
+; GFX1100-GISEL-NEXT:    global_store_b64 v[0:1], v[13:14], off offset:16
+; GFX1100-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %oldval = load <3 x ptr>, ptr addrspace(1) %out
+  %writelane = call <3 x ptr> @llvm.amdgcn.writelane.v3p0(<3 x ptr> %src, i32 %src1, <3 x ptr> %oldval)
+  store <3 x ptr> %writelane, ptr addrspace(1) %out, align 4
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x() #2
 
 attributes #0 = { nounwind readnone convergent }



More information about the cfe-commits mailing list