[llvm] [AMDGPU] Hoist permlane64/readlane/readfirstlane through unary/binary operands (PR #129037)

Pierre van Houtryve via llvm-commits llvm-commits at lists.llvm.org
Thu May 1 03:01:50 PDT 2025


https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/129037

>From 26ca2a15860567c40bfad1e7dd05b2a4eb0207c1 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 27 Feb 2025 12:11:44 +0100
Subject: [PATCH 1/8] [AMDGPU] Hoist readlane/readfirst through unary/binary
 operands

When a read(first)lane is used on a binary operator and the intrinsic is the only user of the operator, we can move the read(first)lane into the operand if the other operand is uniform.

Unfortunately IC doesn't let us access UniformityAnalysis and thus we can't truly check uniformity, we have to do with a basic uniformity check which only allows constants or trivially uniform intrinsics calls.

We can also do the same for simple unary operations.
---
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |  59 +++
 .../Target/AMDGPU/AMDGPUTargetTransformInfo.h |   3 +
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       | 461 ++++++++++++++++++
 .../AMDGPU/llvm.amdgcn.readlane.ll            | 143 ++++++
 4 files changed, 666 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
 create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 7ec2ee06b811a..5a88fc07dbac3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -481,6 +481,59 @@ bool GCNTTIImpl::simplifyDemandedLaneMaskArg(InstCombiner &IC,
   return false;
 }
 
+Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
+                                                     IntrinsicInst &II) const {
+  Instruction *Op = dyn_cast<Instruction>(II.getOperand(0));
+
+  // Only do this if both instructions are in the same block
+  // (so the exec mask won't change) and the readlane is the only user of its
+  // operand.
+  if (!Op || !Op->hasOneUser() || Op->getParent() != II.getParent())
+    return nullptr;
+
+  const bool IsReadLane = (II.getIntrinsicID() == Intrinsic::amdgcn_readlane);
+
+  // If this is a readlane, check that the second operand is a constant, or is
+  // defined before Op so we know it's safe to move this intrinsic higher.
+  Value *LaneID = nullptr;
+  if (IsReadLane) {
+    LaneID = II.getOperand(1);
+    if (!isa<Constant>(LaneID) && !(isa<Instruction>(LaneID) &&
+                                    cast<Instruction>(LaneID)->comesBefore(Op)))
+      return nullptr;
+  }
+
+  const auto DoIt = [&](unsigned OpIdx) -> Instruction * {
+    SmallVector<Value *, 2> Ops{Op->getOperand(OpIdx)};
+    if (IsReadLane)
+      Ops.push_back(LaneID);
+
+    Instruction *NewII =
+        IC.Builder.CreateIntrinsic(II.getType(), II.getIntrinsicID(), Ops);
+
+    Instruction &NewOp = *Op->clone();
+    NewOp.setOperand(OpIdx, NewII);
+    return &NewOp;
+  };
+
+  // TODO: Are any operations more expensive on the SALU than VALU, and thus
+  //       need to be excluded here?
+
+  if (isa<UnaryOperator>(Op))
+    return DoIt(0);
+
+  if (isa<BinaryOperator>(Op)) {
+    // FIXME: If we had access to UniformityInfo here we could just check
+    // if the operand is uniform.
+    if (isTriviallyUniform(Op->getOperandUse(0)))
+      return DoIt(1);
+    if (isTriviallyUniform(Op->getOperandUse(1)))
+      return DoIt(0);
+  }
+
+  return nullptr;
+}
+
 std::optional<Instruction *>
 GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
   Intrinsic::ID IID = II.getIntrinsicID();
@@ -1214,6 +1267,12 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
       }
     }
 
+    // If the readfirstlane reads the result of an operation that exists
+    // both in the SALU and VALU, we may be able to hoist it higher in order
+    // to scalarize the expression.
+    if (Instruction *Res = hoistReadLaneThroughOperand(IC, II))
+      return Res;
+
     return std::nullopt;
   }
   case Intrinsic::amdgcn_writelane: {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index f6f7bd4bfcf5b..4d2ba84b40472 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -236,6 +236,9 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
                                              const APInt &DemandedElts,
                                              APInt &UndefElts) const;
 
+  Instruction *hoistReadLaneThroughOperand(InstCombiner &IC,
+                                           IntrinsicInst &II) const;
+
   std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
       InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
       APInt &UndefElts2, APInt &UndefElts3,
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
new file mode 100644
index 0000000000000..9f27fda591382
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -0,0 +1,461 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 -passes=instcombine -S < %s | FileCheck %s
+
+; test unary
+
+define float @hoist_fneg_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fneg_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[TMP0]]
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fneg float %arg
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define double @hoist_fneg_f64(double %arg) {
+; CHECK-LABEL: define double @hoist_fneg_f64(
+; CHECK-SAME: double [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[TMP0]]
+; CHECK-NEXT:    ret double [[RFL]]
+;
+bb:
+  %val = fneg double %arg
+  %rfl = call double @llvm.amdgcn.readfirstlane.f64(double %val)
+  ret double %rfl
+}
+
+; test binary i32
+
+define i32 @hoist_add_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_add_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = add i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_fadd_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fadd_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fadd float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i32 @hoist_sub_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_sub_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], -16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = sub i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_fsub_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fsub_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[TMP0]], -1.280000e+02
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fsub float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i32 @hoist_mul_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_mul_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = mul i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = mul i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_fmul_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fmul_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fmul float [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fmul float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i32 @hoist_udiv_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_udiv_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = udiv i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = udiv i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_sdiv_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_sdiv_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = sdiv i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = sdiv i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_fdiv_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fdiv_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fmul float [[TMP0]], 7.812500e-03
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fdiv float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i32 @hoist_urem_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_urem_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = urem i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = urem i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_srem_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_srem_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = srem i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = srem i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_frem_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_frem_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = frem float [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = frem float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i32 @hoist_shl_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_shl_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = shl i32 [[TMP0]], 4
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = shl i32 %arg, 4
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_lshr_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_lshr_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = lshr i32 [[TMP0]], 4
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = lshr i32 %arg, 4
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_ashr_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_ashr_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = ashr i32 [[TMP0]], 4
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = ashr i32 %arg, 4
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+
+define i32 @hoist_and_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_and_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = and i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = and i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_or_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_or_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = or i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = or i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_xor_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_xor_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = xor i32 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = xor i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+; test binary i64
+
+define i64 @hoist_and_i64(i64 %arg) {
+; CHECK-LABEL: define i64 @hoist_and_i64(
+; CHECK-SAME: i64 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.amdgcn.readfirstlane.i64(i64 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = and i64 [[TMP0]], 16777215
+; CHECK-NEXT:    ret i64 [[RFL]]
+;
+bb:
+  %val = and i64 %arg, 16777215
+  %rfl = call i64 @llvm.amdgcn.readfirstlane.i32(i64 %val)
+  ret i64 %rfl
+}
+
+define double @hoist_fadd_f64(double %arg) {
+; CHECK-LABEL: define double @hoist_fadd_f64(
+; CHECK-SAME: double [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd double [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    ret double [[RFL]]
+;
+bb:
+  %val = fadd double %arg, 128.0
+  %rfl = call double @llvm.amdgcn.readfirstlane.f64(double %val)
+  ret double %rfl
+}
+
+; test constant on LHS
+
+define i32 @hoist_sub_i32_lhs(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_sub_i32_lhs(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = sub i32 16777215, [[TMP0]]
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = sub i32 16777215, %arg
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_fsub_f32_lhs(float %arg) {
+; CHECK-LABEL: define float @hoist_fsub_f32_lhs(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fsub float 1.280000e+02, [[TMP0]]
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fsub float 128.0, %arg
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+; test other operand is trivially uniform
+
+define i32 @hoist_add_i32_trivially_uniform_rhs(i32 %arg, i32 %v.other) {
+; CHECK-LABEL: define i32 @hoist_add_i32_trivially_uniform_rhs(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[V_OTHER:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[OTHER:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[V_OTHER]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], [[OTHER]]
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %other = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %v.other)
+  %val = add i32 %arg, %other
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_add_i32_trivially_uniform_lhs(i32 %arg, i32 %v.other) {
+; CHECK-LABEL: define i32 @hoist_add_i32_trivially_uniform_lhs(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[V_OTHER:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[OTHER:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[V_OTHER]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = sub i32 [[OTHER]], [[TMP0]]
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %other = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %v.other)
+  %val = sub i32 %other, %arg
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+; test multiple iterations
+
+define i32 @hoist_multiple_times(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_multiple_times(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 16777215, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP2]], 4242
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP3]], 6
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val.0 = shl i32 %arg, 2
+  %val.1 = sub i32 16777215, %val.0
+  %val.2 = xor i32 %val.1, 4242
+  %val.3 = add i32 %val.2, 6
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val.3)
+  ret i32 %rfl
+}
+
+; test cases where hoisting isn't possible
+
+define i32 @cross_block_hoisting(i1 %cond, i32 %arg) {
+; CHECK-LABEL: define i32 @cross_block_hoisting(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], 16777215
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[VAL]])
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[VAL]], %[[BB]] ]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+bb:
+  %val = add i32 %arg, 16777215
+  br i1 %cond, label %then, label %end
+
+then:
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  br label %end
+
+end:
+  %res = phi i32 [%rfl, %then], [%val, %bb]
+  ret i32 %res
+}
+
+define i32 @operand_is_instr(i32 %arg, ptr %src) {
+; CHECK-LABEL: define i32 @operand_is_instr(
+; CHECK-SAME: i32 [[ARG:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[OTHER:%.*]] = load i32, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], [[OTHER]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %other = load i32, ptr %src
+  %val = add i32 %arg, %other
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @operand_is_arg(i32 %arg, i32 %other) {
+; CHECK-LABEL: define i32 @operand_is_arg(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[OTHER:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], [[OTHER]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = add i32 %arg, %other
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
new file mode 100644
index 0000000000000..6ac65f5c70337
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 -passes=instcombine -S < %s | FileCheck %s
+
+; The readfirstlane version of this test covers all the interesting cases of the
+; shared logic. This testcase focuses on readlane specific pitfalls.
+
+; test unary
+
+define float @hoist_fneg_f32(float %arg, i32 %lane) {
+; CHECK-LABEL: define float @hoist_fneg_f32(
+; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fneg float [[ARG]]
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fneg float %arg
+  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rfl
+}
+
+define double @hoist_fneg_f64(double %arg, i32 %lane) {
+; CHECK-LABEL: define double @hoist_fneg_f64(
+; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fneg double [[ARG]]
+; CHECK-NEXT:    [[RFL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret double [[RFL]]
+;
+bb:
+  %val = fneg double %arg
+  %rfl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
+  ret double %rfl
+}
+
+; test binary i32
+
+define i32 @hoist_add_i32(i32 %arg, i32 %lane) {
+; CHECK-LABEL: define i32 @hoist_add_i32(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], 16777215
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = add i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  ret i32 %rfl
+}
+
+define float @hoist_fadd_f32(float %arg, i32 %lane) {
+; CHECK-LABEL: define float @hoist_fadd_f32(
+; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fadd float [[ARG]], 1.280000e+02
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fadd float %arg, 128.0
+  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rfl
+}
+
+; test binary i64
+
+define i64 @hoist_and_i64(i64 %arg, i32 %lane) {
+; CHECK-LABEL: define i64 @hoist_and_i64(
+; CHECK-SAME: i64 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = and i64 [[ARG]], 16777215
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret i64 [[RFL]]
+;
+bb:
+  %val = and i64 %arg, 16777215
+  %rfl = call i64 @llvm.amdgcn.readlane.i32(i64 %val, i32 %lane)
+  ret i64 %rfl
+}
+
+define double @hoist_fadd_f64(double %arg, i32 %lane) {
+; CHECK-LABEL: define double @hoist_fadd_f64(
+; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fadd double [[ARG]], 1.280000e+02
+; CHECK-NEXT:    [[RFL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret double [[RFL]]
+;
+bb:
+  %val = fadd double %arg, 128.0
+  %rfl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
+  ret double %rfl
+}
+
+; test constant on LHS
+
+define i32 @hoist_sub_i32_lhs(i32 %arg, i32 %lane) {
+; CHECK-LABEL: define i32 @hoist_sub_i32_lhs(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = sub i32 16777215, [[ARG]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = sub i32 16777215, %arg
+  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  ret i32 %rfl
+}
+
+define float @hoist_fsub_f32_lhs(float %arg, i32 %lane) {
+; CHECK-LABEL: define float @hoist_fsub_f32_lhs(
+; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fsub float 1.280000e+02, [[ARG]]
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fsub float 128.0, %arg
+  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rfl
+}
+
+; Check cases where we can't move the readlane higher
+
+define float @cannot_move_readlane(float %arg, i32 %base) {
+; CHECK-LABEL: define float @cannot_move_readlane(
+; CHECK-SAME: float [[ARG:%.*]], i32 [[BASE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fsub float 1.280000e+02, [[ARG]]
+; CHECK-NEXT:    [[LANE:%.*]] = add i32 [[BASE]], 2
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fsub float 128.0, %arg
+  %lane = add i32 %base, 2
+  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rfl
+}

>From a756fb01425e0406ff594a986009167ac1b0c123 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 27 Feb 2025 12:42:37 +0100
Subject: [PATCH 2/8] Fix comesBefore check

---
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     | 15 +++-
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       | 30 +++++++
 .../AMDGPU/llvm.amdgcn.readlane.ll            | 90 +++++++++++++++----
 3 files changed, 115 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 5a88fc07dbac3..170b561524549 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -18,6 +18,7 @@
 #include "AMDGPUTargetTransformInfo.h"
 #include "GCNSubtarget.h"
 #include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/IR/Dominators.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/Transforms/InstCombine/InstCombiner.h"
 #include <optional>
@@ -498,8 +499,9 @@ Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
   Value *LaneID = nullptr;
   if (IsReadLane) {
     LaneID = II.getOperand(1);
-    if (!isa<Constant>(LaneID) && !(isa<Instruction>(LaneID) &&
-                                    cast<Instruction>(LaneID)->comesBefore(Op)))
+    // Check LaneID is available at Op, otherwise we can't move the readlane
+    // higher.
+    if (!IC.getDominatorTree().dominates(LaneID, Op))
       return nullptr;
   }
 
@@ -508,8 +510,13 @@ Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
     if (IsReadLane)
       Ops.push_back(LaneID);
 
-    Instruction *NewII =
-        IC.Builder.CreateIntrinsic(II.getType(), II.getIntrinsicID(), Ops);
+    // Make sure convergence tokens are preserved.
+    // TODO: CreateIntrinsic should allow directly copying bundles
+    SmallVector<OperandBundleDef, 2> OpBundles;
+    II.getOperandBundlesAsDefs(OpBundles);
+
+    CallInst *NewII =
+        IC.Builder.CreateCall(II.getCalledFunction(), Ops, OpBundles);
 
     Instruction &NewOp = *Op->clone();
     NewOp.setOperand(OpIdx, NewII);
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
index 9f27fda591382..bc750bea4beea 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -459,3 +459,33 @@ bb:
   %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
   ret i32 %rfl
 }
+
+; test that convergence tokens are preserved
+
+define i32 @hoist_preserves_convergence_token(i1 %cond, i32 %arg) convergent {
+; CHECK-LABEL: define i32 @hoist_preserves_convergence_token(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[ARG:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[ENTRY:%.*]] = call token @llvm.experimental.convergence.entry()
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]]) [ "convergencectrl"(token [[ENTRY]]) ]
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[ARG]], %[[BB]] ]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+bb:
+  %entry = call token @llvm.experimental.convergence.entry()
+  br i1 %cond, label %then, label %end
+
+then:
+  %val = add i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val) [ "convergencectrl"(token %entry)]
+  br label %end
+
+end:
+  %res = phi i32 [%rfl, %then], [%arg, %bb]
+  ret i32 %res
+}
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
index 6ac65f5c70337..ffd0327209ae0 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
@@ -10,8 +10,8 @@ define float @hoist_fneg_f32(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fneg_f32(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fneg float [[ARG]]
-; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[TMP0]]
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -24,8 +24,8 @@ define double @hoist_fneg_f64(double %arg, i32 %lane) {
 ; CHECK-LABEL: define double @hoist_fneg_f64(
 ; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fneg double [[ARG]]
-; CHECK-NEXT:    [[RFL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[TMP0]]
 ; CHECK-NEXT:    ret double [[RFL]]
 ;
 bb:
@@ -40,8 +40,8 @@ define i32 @hoist_add_i32(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: define i32 @hoist_add_i32(
 ; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], 16777215
-; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
 ; CHECK-NEXT:    ret i32 [[RFL]]
 ;
 bb:
@@ -54,8 +54,8 @@ define float @hoist_fadd_f32(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fadd_f32(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fadd float [[ARG]], 1.280000e+02
-; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[TMP0]], 1.280000e+02
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -70,8 +70,8 @@ define i64 @hoist_and_i64(i64 %arg, i32 %lane) {
 ; CHECK-LABEL: define i64 @hoist_and_i64(
 ; CHECK-SAME: i64 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = and i64 [[ARG]], 16777215
-; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = and i64 [[TMP0]], 16777215
 ; CHECK-NEXT:    ret i64 [[RFL]]
 ;
 bb:
@@ -84,8 +84,8 @@ define double @hoist_fadd_f64(double %arg, i32 %lane) {
 ; CHECK-LABEL: define double @hoist_fadd_f64(
 ; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fadd double [[ARG]], 1.280000e+02
-; CHECK-NEXT:    [[RFL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd double [[TMP0]], 1.280000e+02
 ; CHECK-NEXT:    ret double [[RFL]]
 ;
 bb:
@@ -100,8 +100,8 @@ define i32 @hoist_sub_i32_lhs(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: define i32 @hoist_sub_i32_lhs(
 ; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = sub i32 16777215, [[ARG]]
-; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = sub i32 16777215, [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[RFL]]
 ;
 bb:
@@ -114,8 +114,8 @@ define float @hoist_fsub_f32_lhs(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fsub_f32_lhs(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fsub float 1.280000e+02, [[ARG]]
-; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fsub float 1.280000e+02, [[TMP0]]
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -141,3 +141,61 @@ bb:
   %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
   ret float %rfl
 }
+
+define i32 @readlane_lane_op_in_other_block(i1 %cond, i32 %arg, i32 %base) {
+; CHECK-LABEL: define i32 @readlane_lane_op_in_other_block(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[ARG:%.*]], i32 [[BASE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[LANE:%.*]] = add i32 [[BASE]], 2
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[LANE]], %[[BB]] ]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+bb:
+  %lane = add i32 %base, 2
+  br i1 %cond, label %then, label %end
+
+then:
+  %val = add i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  br label %end
+
+end:
+  %res = phi i32 [%rfl, %then], [%lane, %bb]
+  ret i32 %res
+}
+
+; test that convergence tokens are preserved
+
+define i32 @hoist_preserves_convergence_token(i1 %cond, i32 %arg, i32 %lane) convergent {
+; CHECK-LABEL: define i32 @hoist_preserves_convergence_token(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[ENTRY:%.*]] = call token @llvm.experimental.convergence.entry()
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]]) [ "convergencectrl"(token [[ENTRY]]) ]
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[ARG]], %[[BB]] ]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+bb:
+  %entry = call token @llvm.experimental.convergence.entry()
+  br i1 %cond, label %then, label %end
+
+then:
+  %val = add i32 %arg, 16777215
+  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane) [ "convergencectrl"(token %entry)]
+  br label %end
+
+end:
+  %res = phi i32 [%rfl, %then], [%arg, %bb]
+  ret i32 %res
+}

>From ed96a71d0f72cb39845b2f51b4f5387f049e9f07 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 27 Feb 2025 13:30:42 +0100
Subject: [PATCH 3/8] Do not touch permlane64

---
 llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 170b561524549..07896cacbb0b8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1277,8 +1277,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
     // If the readfirstlane reads the result of an operation that exists
     // both in the SALU and VALU, we may be able to hoist it higher in order
     // to scalarize the expression.
-    if (Instruction *Res = hoistReadLaneThroughOperand(IC, II))
-      return Res;
+    if (IID != Intrinsic::amdgcn_permlane64) {
+      if (Instruction *Res = hoistReadLaneThroughOperand(IC, II))
+        return Res;
+    }
 
     return std::nullopt;
   }

>From 0a903dcc17c58612dde8ce3265713b3f605e48d8 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 3 Mar 2025 11:38:21 +0100
Subject: [PATCH 4/8] Add permlane64 + cast instructions

---
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |  81 ++++----
 .../Target/AMDGPU/AMDGPUTargetTransformInfo.h |   4 +-
 .../AMDGPU/llvm.amdgcn.permlane64.ll          | 153 +++++++++++++++
 .../AMDGPU/llvm.amdgcn.readfirstlane.ll       | 184 ++++++++++++++++++
 .../AMDGPU/llvm.amdgcn.readlane.ll            |  74 ++++---
 5 files changed, 427 insertions(+), 69 deletions(-)
 create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 07896cacbb0b8..3adec41839672 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -482,8 +482,14 @@ bool GCNTTIImpl::simplifyDemandedLaneMaskArg(InstCombiner &IC,
   return false;
 }
 
-Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
-                                                     IntrinsicInst &II) const {
+Instruction *
+GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
+                                             IntrinsicInst &II) const {
+  const auto IID = II.getIntrinsicID();
+  assert(IID == Intrinsic::amdgcn_readlane ||
+         IID == Intrinsic::amdgcn_readfirstlane ||
+         IID == Intrinsic::amdgcn_permlane64);
+
   Instruction *Op = dyn_cast<Instruction>(II.getOperand(0));
 
   // Only do this if both instructions are in the same block
@@ -492,7 +498,8 @@ Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
   if (!Op || !Op->hasOneUser() || Op->getParent() != II.getParent())
     return nullptr;
 
-  const bool IsReadLane = (II.getIntrinsicID() == Intrinsic::amdgcn_readlane);
+  const bool IsReadLane = (IID == Intrinsic::amdgcn_readlane);
+  const bool IsPermLane = (IID == Intrinsic::amdgcn_permlane64);
 
   // If this is a readlane, check that the second operand is a constant, or is
   // defined before Op so we know it's safe to move this intrinsic higher.
@@ -505,7 +512,8 @@ Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
       return nullptr;
   }
 
-  const auto DoIt = [&](unsigned OpIdx) -> Instruction * {
+  const auto DoIt = [&](unsigned OpIdx,
+                        Function *NewIntrinsic) -> Instruction * {
     SmallVector<Value *, 2> Ops{Op->getOperand(OpIdx)};
     if (IsReadLane)
       Ops.push_back(LaneID);
@@ -515,27 +523,40 @@ Instruction *GCNTTIImpl::hoistReadLaneThroughOperand(InstCombiner &IC,
     SmallVector<OperandBundleDef, 2> OpBundles;
     II.getOperandBundlesAsDefs(OpBundles);
 
-    CallInst *NewII =
-        IC.Builder.CreateCall(II.getCalledFunction(), Ops, OpBundles);
+    CallInst *NewII = IC.Builder.CreateCall(NewIntrinsic, Ops, OpBundles);
+    NewII->takeName(&II);
 
     Instruction &NewOp = *Op->clone();
     NewOp.setOperand(OpIdx, NewII);
     return &NewOp;
   };
 
-  // TODO: Are any operations more expensive on the SALU than VALU, and thus
-  //       need to be excluded here?
-
   if (isa<UnaryOperator>(Op))
-    return DoIt(0);
+    return DoIt(0, II.getCalledFunction());
+
+  if (isa<CastInst>(Op)) {
+    Value *Src = Op->getOperand(0);
+    Type *SrcTy = Src->getType();
+    if (!isTypeLegal(SrcTy))
+      return nullptr;
+
+    Function *Remangled =
+        Intrinsic::getOrInsertDeclaration(II.getModule(), IID, {SrcTy});
+    return DoIt(0, Remangled);
+  }
 
-  if (isa<BinaryOperator>(Op)) {
+  // Don't hoist through a binary operator for permlane64. It doesn't
+  // achieve anything and we'd need to repeat the call on every operand.
+  //
+  // We can do it for read(first)lane if other operands are already scalar
+  // because then we don't need to repeat the call.
+  if (!IsPermLane && isa<BinaryOperator>(Op)) {
     // FIXME: If we had access to UniformityInfo here we could just check
     // if the operand is uniform.
     if (isTriviallyUniform(Op->getOperandUse(0)))
-      return DoIt(1);
+      return DoIt(1, II.getCalledFunction());
     if (isTriviallyUniform(Op->getOperandUse(1)))
-      return DoIt(0);
+      return DoIt(0, II.getCalledFunction());
   }
 
   return nullptr;
@@ -1233,31 +1254,6 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
         simplifyDemandedLaneMaskArg(IC, II, 1))
       return &II;
 
-    // readfirstlane.ty0 (bitcast ty1 x to ty0) -> bitcast (readfirstlane.ty1)
-    if (auto *BC = dyn_cast<BitCastInst>(Src);
-        BC && BC->hasOneUse() && IID != Intrinsic::amdgcn_ds_bpermute) {
-      Value *BCSrc = BC->getOperand(0);
-
-      // TODO: Handle this for update_dpp, mov_ddp8, and all permlane variants.
-      if (isTypeLegal(BCSrc->getType())) {
-        Module *M = IC.Builder.GetInsertBlock()->getModule();
-        Function *Remangled =
-            Intrinsic::getOrInsertDeclaration(M, IID, {BCSrc->getType()});
-
-        // Make sure convergence tokens are preserved.
-        // TODO: CreateIntrinsic should allow directly copying bundles
-        SmallVector<OperandBundleDef, 2> OpBundles;
-        II.getOperandBundlesAsDefs(OpBundles);
-
-        SmallVector<Value *, 3> Args(II.args());
-        Args[0] = BCSrc;
-
-        CallInst *NewCall = IC.Builder.CreateCall(Remangled, Args, OpBundles);
-        NewCall->takeName(&II);
-        return new BitCastInst(NewCall, II.getType());
-      }
-    }
-
     // If the lane argument of bpermute is uniform, change it to readlane. This
     // generates better code and can enable further optimizations because
     // readlane is AlwaysUniform.
@@ -1274,13 +1270,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
       }
     }
 
-    // If the readfirstlane reads the result of an operation that exists
-    // both in the SALU and VALU, we may be able to hoist it higher in order
-    // to scalarize the expression.
-    if (IID != Intrinsic::amdgcn_permlane64) {
-      if (Instruction *Res = hoistReadLaneThroughOperand(IC, II))
-        return Res;
-    }
+    if (Instruction *Res = hoistLaneIntrinsicThroughOperand(IC, II))
+      return Res;
 
     return std::nullopt;
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 4d2ba84b40472..e00720dfa1eb7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -236,8 +236,8 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
                                              const APInt &DemandedElts,
                                              APInt &UndefElts) const;
 
-  Instruction *hoistReadLaneThroughOperand(InstCombiner &IC,
-                                           IntrinsicInst &II) const;
+  Instruction *hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
+                                                IntrinsicInst &II) const;
 
   std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
       InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll
new file mode 100644
index 0000000000000..3509c81377b13
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll
@@ -0,0 +1,153 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 -passes=instcombine -S < %s | FileCheck %s
+
+; The readfirstlane version of this test covers all the interesting cases of the
+; shared logic. This testcase focuses on permlane64 specific pitfalls.
+
+; test unary
+
+define float @hoist_fneg_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fneg_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[TMP0]]
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fneg float %arg
+  %pl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %pl
+}
+
+define double @hoist_fneg_f64(double %arg) {
+; CHECK-LABEL: define double @hoist_fneg_f64(
+; CHECK-SAME: double [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[TMP0]]
+; CHECK-NEXT:    ret double [[RFL]]
+;
+bb:
+  %val = fneg double %arg
+  %pl = call double @llvm.amdgcn.readfirstlane.f64(double %val)
+  ret double %pl
+}
+
+; test casts
+
+define i32 @hoist_trunc(i64 %arg) {
+; CHECK-LABEL: define i32 @hoist_trunc(
+; CHECK-SAME: i64 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readfirstlane.i64(i64 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RFL]] to i32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+bb:
+  %val = trunc i64 %arg to i32
+  %pl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %pl
+}
+
+define i64 @hoist_zext(i32 %arg) {
+; CHECK-LABEL: define i64 @hoist_zext(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+bb:
+  %val = zext i32 %arg to i64
+  %pl = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %val)
+  ret i64 %pl
+}
+
+; test binary i32
+
+define i32 @hoist_add_i32(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_add_i32(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], 16777215
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = add i32 %arg, 16777215
+  %pl = call i32 @llvm.amdgcn.permlane64.i32(i32 %val)
+  ret i32 %pl
+}
+
+define float @hoist_fadd_f32(float %arg) {
+; CHECK-LABEL: define float @hoist_fadd_f32(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fadd float [[ARG]], 1.280000e+02
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[VAL]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fadd float %arg, 128.0
+  %pl = call float @llvm.amdgcn.permlane64.f32(float %val)
+  ret float %pl
+}
+
+; test cases where hoisting isn't possible
+
+define float @cross_block_hoisting(i1 %cond, float %arg) {
+; CHECK-LABEL: define float @cross_block_hoisting(
+; CHECK-SAME: i1 [[COND:%.*]], float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[VAL:%.*]] = fneg float [[ARG]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[VAL]])
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[RFL]], %[[THEN]] ], [ [[VAL]], %[[BB]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+bb:
+  %val = fneg float %arg
+  br i1 %cond, label %then, label %end
+
+then:
+  %pl = call float @llvm.amdgcn.permlane64.f32(float %val)
+  br label %end
+
+end:
+  %res = phi float [%pl, %then], [%val, %bb]
+  ret float %res
+}
+
+; test that convergence tokens are preserved
+
+define float @hoist_preserves_convergence_token(i1 %cond, float %arg) convergent {
+; CHECK-LABEL: define float @hoist_preserves_convergence_token(
+; CHECK-SAME: i1 [[COND:%.*]], float [[ARG:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  [[BB:.*]]:
+; CHECK-NEXT:    [[ENTRY:%.*]] = call token @llvm.experimental.convergence.entry()
+; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
+; CHECK:       [[THEN]]:
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[ARG]]) [ "convergencectrl"(token [[ENTRY]]) ]
+; CHECK-NEXT:    [[TMP0:%.*]] = fneg float [[RFL]]
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[TMP0]], %[[THEN]] ], [ [[ARG]], %[[BB]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+bb:
+  %entry = call token @llvm.experimental.convergence.entry()
+  br i1 %cond, label %then, label %end
+
+then:
+  %val = fneg float %arg
+  %pl = call float @llvm.amdgcn.permlane64.f32(float %val) [ "convergencectrl"(token %entry)]
+  br label %end
+
+end:
+  %res = phi float [%pl, %then], [%arg, %bb]
+  ret float %res
+}
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
index bc750bea4beea..60561459e3f11 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -31,6 +31,190 @@ bb:
   ret double %rfl
 }
 
+; test casts
+
+define i32 @hoist_trunc(i64 %arg) {
+; CHECK-LABEL: define i32 @hoist_trunc(
+; CHECK-SAME: i64 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readfirstlane.i64(i64 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RFL]] to i32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+bb:
+  %val = trunc i64 %arg to i32
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i64 @hoist_zext(i32 %arg) {
+; CHECK-LABEL: define i64 @hoist_zext(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+bb:
+  %val = zext i32 %arg to i64
+  %rfl = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %val)
+  ret i64 %rfl
+}
+
+define i64 @hoist_sext(i32 %arg) {
+; CHECK-LABEL: define i64 @hoist_sext(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+bb:
+  %val = zext i32 %arg to i64
+  %rfl = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %val)
+  ret i64 %rfl
+}
+
+define i32 @hoist_fptoui(float %arg) {
+; CHECK-LABEL: define i32 @hoist_fptoui(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = fptoui float [[RFL]] to i32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+bb:
+  %val = fptoui float %arg to i32
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @hoist_fptosi(float %arg) {
+; CHECK-LABEL: define i32 @hoist_fptosi(
+; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = fptosi float [[RFL]] to i32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+bb:
+  %val = fptosi float %arg to i32
+  %rfl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define float @hoist_uitofp(i32 %arg) {
+; CHECK-LABEL: define float @hoist_uitofp(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = uitofp i32 [[RFL]] to float
+; CHECK-NEXT:    ret float [[TMP0]]
+;
+bb:
+  %val = uitofp i32 %arg to float
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define float @hoist_sitofp(i32 %arg) {
+; CHECK-LABEL: define float @hoist_sitofp(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = sitofp i32 [[RFL]] to float
+; CHECK-NEXT:    ret float [[TMP0]]
+;
+bb:
+  %val = sitofp i32 %arg to float
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define float @hoist_fptrunc(double %arg) {
+; CHECK-LABEL: define float @hoist_fptrunc(
+; CHECK-SAME: double [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = fptrunc double [[RFL]] to float
+; CHECK-NEXT:    ret float [[TMP0]]
+;
+bb:
+  %val = fptrunc double %arg to float
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define float @hoist_fpext(half %arg) {
+; CHECK-LABEL: define float @hoist_fpext(
+; CHECK-SAME: half [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call half @llvm.amdgcn.readfirstlane.f16(half [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = fpext half [[RFL]] to float
+; CHECK-NEXT:    ret float [[TMP0]]
+;
+bb:
+  %val = fpext half %arg to float
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define i64 @hoist_ptrtoint(ptr %arg) {
+; CHECK-LABEL: define i64 @hoist_ptrtoint(
+; CHECK-SAME: ptr [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call ptr @llvm.amdgcn.readfirstlane.p0(ptr [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RFL]] to i64
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+bb:
+  %val = ptrtoint ptr %arg to i64
+  %rfl = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %val)
+  ret i64 %rfl
+}
+
+define ptr @hoist_inttoptr(i64 %arg) {
+; CHECK-LABEL: define ptr @hoist_inttoptr(
+; CHECK-SAME: i64 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readfirstlane.i64(i64 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = inttoptr i64 [[RFL]] to ptr
+; CHECK-NEXT:    ret ptr [[TMP0]]
+;
+bb:
+  %val = inttoptr i64 %arg to ptr
+  %rfl = call ptr @llvm.amdgcn.readfirstlane.p0(ptr %val)
+  ret ptr %rfl
+}
+
+define float @hoist_bitcast(i32 %arg) {
+; CHECK-LABEL: define float @hoist_bitcast(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32 [[RFL]] to float
+; CHECK-NEXT:    ret float [[TMP0]]
+;
+bb:
+  %val = bitcast i32 %arg to float
+  %rfl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  ret float %rfl
+}
+
+define ptr addrspace(1) @hoist_addrspacecast(ptr addrspace(0) %arg) {
+; CHECK-LABEL: define ptr addrspace(1) @hoist_addrspacecast(
+; CHECK-SAME: ptr [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call ptr @llvm.amdgcn.readfirstlane.p0(ptr [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = addrspacecast ptr [[RFL]] to ptr addrspace(1)
+; CHECK-NEXT:    ret ptr addrspace(1) [[TMP0]]
+;
+bb:
+  %val = addrspacecast ptr addrspace(0) %arg to ptr addrspace(1)
+  %rfl = call ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1) %val)
+  ret ptr addrspace(1) %rfl
+}
+
 ; test binary i32
 
 define i32 @hoist_add_i32(i32 %arg) {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
index ffd0327209ae0..09d5c71207cdc 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
@@ -16,8 +16,8 @@ define float @hoist_fneg_f32(float %arg, i32 %lane) {
 ;
 bb:
   %val = fneg float %arg
-  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
-  ret float %rfl
+  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rl
 }
 
 define double @hoist_fneg_f64(double %arg, i32 %lane) {
@@ -30,8 +30,38 @@ define double @hoist_fneg_f64(double %arg, i32 %lane) {
 ;
 bb:
   %val = fneg double %arg
-  %rfl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
-  ret double %rfl
+  %rl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
+  ret double %rl
+}
+
+; test casts
+
+define i32 @hoist_trunc(i64 %arg, i32 %lane) {
+; CHECK-LABEL: define i32 @hoist_trunc(
+; CHECK-SAME: i64 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RFL]] to i32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+bb:
+  %val = trunc i64 %arg to i32
+  %rl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  ret i32 %rl
+}
+
+define i64 @hoist_zext(i32 %arg, i32 %lane) {
+; CHECK-LABEL: define i64 @hoist_zext(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
+; CHECK-NEXT:    ret i64 [[TMP0]]
+;
+bb:
+  %val = zext i32 %arg to i64
+  %rl = call i64 @llvm.amdgcn.readlane.i64(i64 %val, i32 %lane)
+  ret i64 %rl
 }
 
 ; test binary i32
@@ -46,8 +76,8 @@ define i32 @hoist_add_i32(i32 %arg, i32 %lane) {
 ;
 bb:
   %val = add i32 %arg, 16777215
-  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
-  ret i32 %rfl
+  %rl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  ret i32 %rl
 }
 
 define float @hoist_fadd_f32(float %arg, i32 %lane) {
@@ -60,8 +90,8 @@ define float @hoist_fadd_f32(float %arg, i32 %lane) {
 ;
 bb:
   %val = fadd float %arg, 128.0
-  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
-  ret float %rfl
+  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rl
 }
 
 ; test binary i64
@@ -76,8 +106,8 @@ define i64 @hoist_and_i64(i64 %arg, i32 %lane) {
 ;
 bb:
   %val = and i64 %arg, 16777215
-  %rfl = call i64 @llvm.amdgcn.readlane.i32(i64 %val, i32 %lane)
-  ret i64 %rfl
+  %rl = call i64 @llvm.amdgcn.readlane.i32(i64 %val, i32 %lane)
+  ret i64 %rl
 }
 
 define double @hoist_fadd_f64(double %arg, i32 %lane) {
@@ -90,8 +120,8 @@ define double @hoist_fadd_f64(double %arg, i32 %lane) {
 ;
 bb:
   %val = fadd double %arg, 128.0
-  %rfl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
-  ret double %rfl
+  %rl = call double @llvm.amdgcn.readlane.f64(double %val, i32 %lane)
+  ret double %rl
 }
 
 ; test constant on LHS
@@ -106,8 +136,8 @@ define i32 @hoist_sub_i32_lhs(i32 %arg, i32 %lane) {
 ;
 bb:
   %val = sub i32 16777215, %arg
-  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
-  ret i32 %rfl
+  %rl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  ret i32 %rl
 }
 
 define float @hoist_fsub_f32_lhs(float %arg, i32 %lane) {
@@ -120,8 +150,8 @@ define float @hoist_fsub_f32_lhs(float %arg, i32 %lane) {
 ;
 bb:
   %val = fsub float 128.0, %arg
-  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
-  ret float %rfl
+  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rl
 }
 
 ; Check cases where we can't move the readlane higher
@@ -138,8 +168,8 @@ define float @cannot_move_readlane(float %arg, i32 %base) {
 bb:
   %val = fsub float 128.0, %arg
   %lane = add i32 %base, 2
-  %rfl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
-  ret float %rfl
+  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rl
 }
 
 define i32 @readlane_lane_op_in_other_block(i1 %cond, i32 %arg, i32 %base) {
@@ -162,11 +192,11 @@ bb:
 
 then:
   %val = add i32 %arg, 16777215
-  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
+  %rl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane)
   br label %end
 
 end:
-  %res = phi i32 [%rfl, %then], [%lane, %bb]
+  %res = phi i32 [%rl, %then], [%lane, %bb]
   ret i32 %res
 }
 
@@ -192,10 +222,10 @@ bb:
 
 then:
   %val = add i32 %arg, 16777215
-  %rfl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane) [ "convergencectrl"(token %entry)]
+  %rl = call i32 @llvm.amdgcn.readlane.i32(i32 %val, i32 %lane) [ "convergencectrl"(token %entry)]
   br label %end
 
 end:
-  %res = phi i32 [%rfl, %then], [%arg, %bb]
+  %res = phi i32 [%rl, %then], [%arg, %bb]
   ret i32 %res
 }

>From cf5731cb2aae1a40c74bac33219d749a294595e9 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 3 Mar 2025 12:03:39 +0100
Subject: [PATCH 5/8] Also hoist permlane64 through binary ops

---
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |  9 +--
 .../AMDGPU/llvm.amdgcn.permlane64.ll          | 78 +++++++++++++++----
 2 files changed, 67 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 3adec41839672..2436022521e5f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -499,7 +499,6 @@ GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
     return nullptr;
 
   const bool IsReadLane = (IID == Intrinsic::amdgcn_readlane);
-  const bool IsPermLane = (IID == Intrinsic::amdgcn_permlane64);
 
   // If this is a readlane, check that the second operand is a constant, or is
   // defined before Op so we know it's safe to move this intrinsic higher.
@@ -545,12 +544,8 @@ GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
     return DoIt(0, Remangled);
   }
 
-  // Don't hoist through a binary operator for permlane64. It doesn't
-  // achieve anything and we'd need to repeat the call on every operand.
-  //
-  // We can do it for read(first)lane if other operands are already scalar
-  // because then we don't need to repeat the call.
-  if (!IsPermLane && isa<BinaryOperator>(Op)) {
+  // We can also hoist through binary operators if the other operand is uniform.
+  if (isa<BinaryOperator>(Op)) {
     // FIXME: If we had access to UniformityInfo here we could just check
     // if the operand is uniform.
     if (isTriviallyUniform(Op->getOperandUse(0)))
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll
index 3509c81377b13..4ab5a5cb0844b 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.permlane64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 -passes=instcombine -S < %s | FileCheck %s
 
-; The readfirstlane version of this test covers all the interesting cases of the
+; The permlane64 version of this test covers all the interesting cases of the
 ; shared logic. This testcase focuses on permlane64 specific pitfalls.
 
 ; test unary
@@ -10,13 +10,13 @@ define float @hoist_fneg_f32(float %arg) {
 ; CHECK-LABEL: define float @hoist_fneg_f32(
 ; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[ARG]])
 ; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[TMP0]]
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
   %val = fneg float %arg
-  %pl = call float @llvm.amdgcn.readfirstlane.f32(float %val)
+  %pl = call float @llvm.amdgcn.permlane64.f32(float %val)
   ret float %pl
 }
 
@@ -24,13 +24,13 @@ define double @hoist_fneg_f64(double %arg) {
 ; CHECK-LABEL: define double @hoist_fneg_f64(
 ; CHECK-SAME: double [[ARG:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[ARG]])
+; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.permlane64.f64(double [[ARG]])
 ; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[TMP0]]
 ; CHECK-NEXT:    ret double [[RFL]]
 ;
 bb:
   %val = fneg double %arg
-  %pl = call double @llvm.amdgcn.readfirstlane.f64(double %val)
+  %pl = call double @llvm.amdgcn.permlane64.f64(double %val)
   ret double %pl
 }
 
@@ -40,13 +40,13 @@ define i32 @hoist_trunc(i64 %arg) {
 ; CHECK-LABEL: define i32 @hoist_trunc(
 ; CHECK-SAME: i64 [[ARG:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readfirstlane.i64(i64 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.permlane64.i64(i64 [[ARG]])
 ; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RFL]] to i32
 ; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 bb:
   %val = trunc i64 %arg to i32
-  %pl = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %val)
+  %pl = call i32 @llvm.amdgcn.permlane64.i32(i32 %val)
   ret i32 %pl
 }
 
@@ -54,13 +54,13 @@ define i64 @hoist_zext(i32 %arg) {
 ; CHECK-LABEL: define i64 @hoist_zext(
 ; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[ARG]])
 ; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
 ; CHECK-NEXT:    ret i64 [[TMP0]]
 ;
 bb:
   %val = zext i32 %arg to i64
-  %pl = call i64 @llvm.amdgcn.readfirstlane.i64(i64 %val)
+  %pl = call i64 @llvm.amdgcn.permlane64.i64(i64 %val)
   ret i64 %pl
 }
 
@@ -70,8 +70,8 @@ define i32 @hoist_add_i32(i32 %arg) {
 ; CHECK-LABEL: define i32 @hoist_add_i32(
 ; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], 16777215
-; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[VAL]])
+; CHECK-NEXT:    [[PL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[PL]], 16777215
 ; CHECK-NEXT:    ret i32 [[RFL]]
 ;
 bb:
@@ -84,8 +84,8 @@ define float @hoist_fadd_f32(float %arg) {
 ; CHECK-LABEL: define float @hoist_fadd_f32(
 ; CHECK-SAME: float [[ARG:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fadd float [[ARG]], 1.280000e+02
-; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[VAL]])
+; CHECK-NEXT:    [[PL:%.*]] = call float @llvm.amdgcn.permlane64.f32(float [[ARG]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[PL]], 1.280000e+02
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -94,8 +94,60 @@ bb:
   ret float %pl
 }
 
+; test multiple iterations
+
+define i32 @hoist_multiple_times(i32 %arg) {
+; CHECK-LABEL: define i32 @hoist_multiple_times(
+; CHECK-SAME: i32 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[ARG]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 16777215, [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP2]], 4242
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP3]], 6
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val.0 = shl i32 %arg, 2
+  %val.1 = sub i32 16777215, %val.0
+  %val.2 = xor i32 %val.1, 4242
+  %val.3 = add i32 %val.2, 6
+  %rfl = call i32 @llvm.amdgcn.permlane64.i32(i32 %val.3)
+  ret i32 %rfl
+}
+
 ; test cases where hoisting isn't possible
 
+define i32 @operand_is_instr(i32 %arg, ptr %src) {
+; CHECK-LABEL: define i32 @operand_is_instr(
+; CHECK-SAME: i32 [[ARG:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[OTHER:%.*]] = load i32, ptr [[SRC]], align 4
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], [[OTHER]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %other = load i32, ptr %src
+  %val = add i32 %arg, %other
+  %rfl = call i32 @llvm.amdgcn.permlane64.i32(i32 %val)
+  ret i32 %rfl
+}
+
+define i32 @operand_is_arg(i32 %arg, i32 %other) {
+; CHECK-LABEL: define i32 @operand_is_arg(
+; CHECK-SAME: i32 [[ARG:%.*]], i32 [[OTHER:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = add i32 [[ARG]], [[OTHER]]
+; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RFL]]
+;
+bb:
+  %val = add i32 %arg, %other
+  %rfl = call i32 @llvm.amdgcn.permlane64.i32(i32 %val)
+  ret i32 %rfl
+}
+
 define float @cross_block_hoisting(i1 %cond, float %arg) {
 ; CHECK-LABEL: define float @cross_block_hoisting(
 ; CHECK-SAME: i1 [[COND:%.*]], float [[ARG:%.*]]) #[[ATTR0]] {

>From c59741a2bd2fcb6efff19c59f2bad5b37b604a51 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Wed, 30 Apr 2025 11:31:20 +0200
Subject: [PATCH 6/8] fix rebase issues

---
 llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 2436022521e5f..2d6fa5b39af53 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1265,8 +1265,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
       }
     }
 
-    if (Instruction *Res = hoistLaneIntrinsicThroughOperand(IC, II))
-      return Res;
+    if (IID != Intrinsic::amdgcn_ds_bpermute) {
+      if (Instruction *Res = hoistLaneIntrinsicThroughOperand(IC, II))
+        return Res;
+    }
 
     return std::nullopt;
   }

>From f7e30b7524aad8648f49abb2fe1040bd7a9ff014 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Wed, 30 Apr 2025 13:47:14 +0200
Subject: [PATCH 7/8] Use isSafeToMoveBefore

---
 llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 2d6fa5b39af53..394e2fe573333 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -21,6 +21,7 @@
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/Transforms/InstCombine/InstCombiner.h"
+#include "llvm/Transforms/Utils/CodeMoverUtils.h"
 #include <optional>
 
 using namespace llvm;
@@ -507,8 +508,10 @@ GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
     LaneID = II.getOperand(1);
     // Check LaneID is available at Op, otherwise we can't move the readlane
     // higher.
-    if (!IC.getDominatorTree().dominates(LaneID, Op))
-      return nullptr;
+    if (auto *LaneIDInst = dyn_cast<Instruction>(LaneID)) {
+      if (!isSafeToMoveBefore(*LaneIDInst, *Op, IC.getDominatorTree()))
+        return nullptr;
+    }
   }
 
   const auto DoIt = [&](unsigned OpIdx,

>From e9cbeee4ce49c7da1015835c3725680fb7cb23b5 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 1 May 2025 11:59:59 +0200
Subject: [PATCH 8/8] Comments

---
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     | 54 ++++++-----
 .../AMDGPU/llvm.amdgcn.readlane.ll            | 89 ++++++++++---------
 2 files changed, 77 insertions(+), 66 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 394e2fe573333..1bdc78e098d5f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -21,7 +21,6 @@
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/Transforms/InstCombine/InstCombiner.h"
-#include "llvm/Transforms/Utils/CodeMoverUtils.h"
 #include <optional>
 
 using namespace llvm;
@@ -483,6 +482,16 @@ bool GCNTTIImpl::simplifyDemandedLaneMaskArg(InstCombiner &IC,
   return false;
 }
 
+static CallInst *rewriteCall(IRBuilderBase &B, CallInst &Old,
+                             Function &NewCallee, ArrayRef<Value *> Ops) {
+  SmallVector<OperandBundleDef, 2> OpBundles;
+  Old.getOperandBundlesAsDefs(OpBundles);
+
+  CallInst *NewCall = B.CreateCall(&NewCallee, Ops, OpBundles);
+  NewCall->takeName(&Old);
+  return NewCall;
+}
+
 Instruction *
 GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
                                              IntrinsicInst &II) const {
@@ -491,53 +500,54 @@ GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
          IID == Intrinsic::amdgcn_readfirstlane ||
          IID == Intrinsic::amdgcn_permlane64);
 
-  Instruction *Op = dyn_cast<Instruction>(II.getOperand(0));
+  Instruction *OpInst = dyn_cast<Instruction>(II.getOperand(0));
 
   // Only do this if both instructions are in the same block
   // (so the exec mask won't change) and the readlane is the only user of its
   // operand.
-  if (!Op || !Op->hasOneUser() || Op->getParent() != II.getParent())
+  if (!OpInst || !OpInst->hasOneUser() || OpInst->getParent() != II.getParent())
     return nullptr;
 
   const bool IsReadLane = (IID == Intrinsic::amdgcn_readlane);
 
   // If this is a readlane, check that the second operand is a constant, or is
-  // defined before Op so we know it's safe to move this intrinsic higher.
+  // defined before OpInst so we know it's safe to move this intrinsic higher.
   Value *LaneID = nullptr;
   if (IsReadLane) {
     LaneID = II.getOperand(1);
-    // Check LaneID is available at Op, otherwise we can't move the readlane
-    // higher.
+
+    // readlane take an extra operand for the lane ID, so we must check if that
+    // LaneID value can be used at the point where we want to move the
+    // intrinsic.
     if (auto *LaneIDInst = dyn_cast<Instruction>(LaneID)) {
-      if (!isSafeToMoveBefore(*LaneIDInst, *Op, IC.getDominatorTree()))
+      if (!IC.getDominatorTree().dominates(LaneIDInst, OpInst))
         return nullptr;
     }
   }
 
+  // Hoist the intrinsic (II) through OpInst.
+  //
+  // (II (OpInst x)) -> (OpInst (II x))
   const auto DoIt = [&](unsigned OpIdx,
                         Function *NewIntrinsic) -> Instruction * {
-    SmallVector<Value *, 2> Ops{Op->getOperand(OpIdx)};
+    SmallVector<Value *, 2> Ops{OpInst->getOperand(OpIdx)};
     if (IsReadLane)
       Ops.push_back(LaneID);
 
-    // Make sure convergence tokens are preserved.
-    // TODO: CreateIntrinsic should allow directly copying bundles
-    SmallVector<OperandBundleDef, 2> OpBundles;
-    II.getOperandBundlesAsDefs(OpBundles);
-
-    CallInst *NewII = IC.Builder.CreateCall(NewIntrinsic, Ops, OpBundles);
-    NewII->takeName(&II);
+    // Rewrite the intrinsic call.
+    CallInst *NewII = rewriteCall(IC.Builder, II, *NewIntrinsic, Ops);
 
-    Instruction &NewOp = *Op->clone();
+    // Rewrite OpInst so it takes the result of the intrinsic now.
+    Instruction &NewOp = *OpInst->clone();
     NewOp.setOperand(OpIdx, NewII);
     return &NewOp;
   };
 
-  if (isa<UnaryOperator>(Op))
+  if (isa<UnaryOperator>(OpInst))
     return DoIt(0, II.getCalledFunction());
 
-  if (isa<CastInst>(Op)) {
-    Value *Src = Op->getOperand(0);
+  if (isa<CastInst>(OpInst)) {
+    Value *Src = OpInst->getOperand(0);
     Type *SrcTy = Src->getType();
     if (!isTypeLegal(SrcTy))
       return nullptr;
@@ -548,12 +558,12 @@ GCNTTIImpl::hoistLaneIntrinsicThroughOperand(InstCombiner &IC,
   }
 
   // We can also hoist through binary operators if the other operand is uniform.
-  if (isa<BinaryOperator>(Op)) {
+  if (isa<BinaryOperator>(OpInst)) {
     // FIXME: If we had access to UniformityInfo here we could just check
     // if the operand is uniform.
-    if (isTriviallyUniform(Op->getOperandUse(0)))
+    if (isTriviallyUniform(OpInst->getOperandUse(0)))
       return DoIt(1, II.getCalledFunction());
-    if (isTriviallyUniform(Op->getOperandUse(1)))
+    if (isTriviallyUniform(OpInst->getOperandUse(1)))
       return DoIt(0, II.getCalledFunction());
   }
 
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
index 09d5c71207cdc..a9ac4bc93fd3c 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/llvm.amdgcn.readlane.ll
@@ -10,8 +10,8 @@ define float @hoist_fneg_f32(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fneg_f32(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[TMP0]]
+; CHECK-NEXT:    [[RL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg float [[RL]]
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -24,8 +24,8 @@ define double @hoist_fneg_f64(double %arg, i32 %lane) {
 ; CHECK-LABEL: define double @hoist_fneg_f64(
 ; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[TMP0]]
+; CHECK-NEXT:    [[RL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fneg double [[RL]]
 ; CHECK-NEXT:    ret double [[RFL]]
 ;
 bb:
@@ -40,8 +40,8 @@ define i32 @hoist_trunc(i64 %arg, i32 %lane) {
 ; CHECK-LABEL: define i32 @hoist_trunc(
 ; CHECK-SAME: i64 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[RFL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RFL]] to i32
+; CHECK-NEXT:    [[RL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[RL]] to i32
 ; CHECK-NEXT:    ret i32 [[TMP0]]
 ;
 bb:
@@ -54,8 +54,8 @@ define i64 @hoist_zext(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: define i64 @hoist_zext(
 ; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[RFL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RFL]] to i64
+; CHECK-NEXT:    [[RL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[RL]] to i64
 ; CHECK-NEXT:    ret i64 [[TMP0]]
 ;
 bb:
@@ -70,8 +70,8 @@ define i32 @hoist_add_i32(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: define i32 @hoist_add_i32(
 ; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    [[RL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[RL]], 16777215
 ; CHECK-NEXT:    ret i32 [[RFL]]
 ;
 bb:
@@ -84,8 +84,8 @@ define float @hoist_fadd_f32(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fadd_f32(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    [[RL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd float [[RL]], 1.280000e+02
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -100,8 +100,8 @@ define i64 @hoist_and_i64(i64 %arg, i32 %lane) {
 ; CHECK-LABEL: define i64 @hoist_and_i64(
 ; CHECK-SAME: i64 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = and i64 [[TMP0]], 16777215
+; CHECK-NEXT:    [[RL:%.*]] = call i64 @llvm.amdgcn.readlane.i64(i64 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = and i64 [[RL]], 16777215
 ; CHECK-NEXT:    ret i64 [[RFL]]
 ;
 bb:
@@ -114,8 +114,8 @@ define double @hoist_fadd_f64(double %arg, i32 %lane) {
 ; CHECK-LABEL: define double @hoist_fadd_f64(
 ; CHECK-SAME: double [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = fadd double [[TMP0]], 1.280000e+02
+; CHECK-NEXT:    [[RL:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fadd double [[RL]], 1.280000e+02
 ; CHECK-NEXT:    ret double [[RFL]]
 ;
 bb:
@@ -130,8 +130,8 @@ define i32 @hoist_sub_i32_lhs(i32 %arg, i32 %lane) {
 ; CHECK-LABEL: define i32 @hoist_sub_i32_lhs(
 ; CHECK-SAME: i32 [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = sub i32 16777215, [[TMP0]]
+; CHECK-NEXT:    [[RL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = sub i32 16777215, [[RL]]
 ; CHECK-NEXT:    ret i32 [[RFL]]
 ;
 bb:
@@ -144,8 +144,8 @@ define float @hoist_fsub_f32_lhs(float %arg, i32 %lane) {
 ; CHECK-LABEL: define float @hoist_fsub_f32_lhs(
 ; CHECK-SAME: float [[ARG:%.*]], i32 [[LANE:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = fsub float 1.280000e+02, [[TMP0]]
+; CHECK-NEXT:    [[RL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[RFL:%.*]] = fsub float 1.280000e+02, [[RL]]
 ; CHECK-NEXT:    ret float [[RFL]]
 ;
 bb:
@@ -154,24 +154,6 @@ bb:
   ret float %rl
 }
 
-; Check cases where we can't move the readlane higher
-
-define float @cannot_move_readlane(float %arg, i32 %base) {
-; CHECK-LABEL: define float @cannot_move_readlane(
-; CHECK-SAME: float [[ARG:%.*]], i32 [[BASE:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:  [[BB:.*:]]
-; CHECK-NEXT:    [[VAL:%.*]] = fsub float 1.280000e+02, [[ARG]]
-; CHECK-NEXT:    [[LANE:%.*]] = add i32 [[BASE]], 2
-; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
-; CHECK-NEXT:    ret float [[RFL]]
-;
-bb:
-  %val = fsub float 128.0, %arg
-  %lane = add i32 %base, 2
-  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
-  ret float %rl
-}
-
 define i32 @readlane_lane_op_in_other_block(i1 %cond, i32 %arg, i32 %base) {
 ; CHECK-LABEL: define i32 @readlane_lane_op_in_other_block(
 ; CHECK-SAME: i1 [[COND:%.*]], i32 [[ARG:%.*]], i32 [[BASE:%.*]]) #[[ATTR0]] {
@@ -179,11 +161,11 @@ define i32 @readlane_lane_op_in_other_block(i1 %cond, i32 %arg, i32 %base) {
 ; CHECK-NEXT:    [[LANE:%.*]] = add i32 [[BASE]], 2
 ; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
 ; CHECK:       [[THEN]]:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
-; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    [[RL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]])
+; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[RL]], 16777215
 ; CHECK-NEXT:    br label %[[END]]
 ; CHECK:       [[END]]:
-; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[LANE]], %[[BB]] ]
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[TMP0]], %[[THEN]] ], [ [[LANE]], %[[BB]] ]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 bb:
@@ -200,6 +182,25 @@ end:
   ret i32 %res
 }
 
+; Check cases where we can't move the readlane higher
+
+define float @cannot_move_readlane(float %arg, i32 %base) {
+; CHECK-LABEL: define float @cannot_move_readlane(
+; CHECK-SAME: float [[ARG:%.*]], i32 [[BASE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[BB:.*:]]
+; CHECK-NEXT:    [[VAL:%.*]] = fsub float 1.280000e+02, [[ARG]]
+; CHECK-NEXT:    [[LANE:%.*]] = add i32 [[BASE]], 2
+; CHECK-NEXT:    [[RFL:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL]], i32 [[LANE]])
+; CHECK-NEXT:    ret float [[RFL]]
+;
+bb:
+  %val = fsub float 128.0, %arg
+  %lane = add i32 %base, 2
+  %rl = call float @llvm.amdgcn.readlane.f32(float %val, i32 %lane)
+  ret float %rl
+}
+
+
 ; test that convergence tokens are preserved
 
 define i32 @hoist_preserves_convergence_token(i1 %cond, i32 %arg, i32 %lane) convergent {
@@ -209,11 +210,11 @@ define i32 @hoist_preserves_convergence_token(i1 %cond, i32 %arg, i32 %lane) con
 ; CHECK-NEXT:    [[ENTRY:%.*]] = call token @llvm.experimental.convergence.entry()
 ; CHECK-NEXT:    br i1 [[COND]], label %[[THEN:.*]], label %[[END:.*]]
 ; CHECK:       [[THEN]]:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]]) [ "convergencectrl"(token [[ENTRY]]) ]
-; CHECK-NEXT:    [[RFL:%.*]] = add i32 [[TMP0]], 16777215
+; CHECK-NEXT:    [[RL:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG]], i32 [[LANE]]) [ "convergencectrl"(token [[ENTRY]]) ]
+; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[RL]], 16777215
 ; CHECK-NEXT:    br label %[[END]]
 ; CHECK:       [[END]]:
-; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[RFL]], %[[THEN]] ], [ [[ARG]], %[[BB]] ]
+; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[TMP0]], %[[THEN]] ], [ [[ARG]], %[[BB]] ]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
 bb:



More information about the llvm-commits mailing list