[llvm] 8ba1c38 - [AArch64][GlobalISel] Add heuristics for G_FCONSTANT localization.
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 31 22:23:45 PDT 2023
Author: Amara Emerson
Date: 2023-08-31T22:23:36-07:00
New Revision: 8ba1c38a0d70a8e972f1f011629d6391f8744072
URL: https://github.com/llvm/llvm-project/commit/8ba1c38a0d70a8e972f1f011629d6391f8744072
DIFF: https://github.com/llvm/llvm-project/commit/8ba1c38a0d70a8e972f1f011629d6391f8744072.diff
LOG: [AArch64][GlobalISel] Add heuristics for G_FCONSTANT localization.
Now that in an earlier commit we adopt the heuristics for SDAG's expansion
of 32/64b fpimms to either GPR materializations or CP load, we can also improve
the localizer to also understand the same heuristics. This avoids localizing
expensive immediates as that increases code size.
The combination of these two changes results in minor improvements in CTMark -Os,
and bigger improvements in some other cases.
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b1042241859180..b7bfc7563a23bb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -50,6 +50,7 @@
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
@@ -24764,7 +24765,8 @@ bool AArch64TargetLowering::shouldLocalize(
llvm_unreachable("Unexpected remat cost");
};
- switch (MI.getOpcode()) {
+ unsigned Opc = MI.getOpcode();
+ switch (Opc) {
case TargetOpcode::G_GLOBAL_VALUE: {
// On Darwin, TLS global vars get selected into function calls, which
// we don't want localized, as they can get moved into the middle of a
@@ -24774,14 +24776,37 @@ bool AArch64TargetLowering::shouldLocalize(
return false;
return true; // Always localize G_GLOBAL_VALUE to avoid high reg pressure.
}
+ case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_CONSTANT: {
- auto *CI = MI.getOperand(1).getCImm();
+ const ConstantInt *CI;
+ unsigned AdditionalCost = 0;
+
+ if (Opc == TargetOpcode::G_CONSTANT)
+ CI = MI.getOperand(1).getCImm();
+ else {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ // We try to estimate cost of 32/64b fpimms, as they'll likely be
+ // materialized as integers.
+ if (Ty.getScalarSizeInBits() != 32 && Ty.getScalarSizeInBits() != 64)
+ break;
+ auto APF = MI.getOperand(1).getFPImm()->getValueAPF();
+ bool OptForSize =
+ MF.getFunction().hasOptSize() || MF.getFunction().hasMinSize();
+ if (isFPImmLegal(APF, EVT::getFloatingPointVT(Ty.getScalarSizeInBits()),
+ OptForSize))
+ return true; // Constant should be cheap.
+ CI =
+ ConstantInt::get(MF.getFunction().getContext(), APF.bitcastToAPInt());
+ // FP materialization also costs an extra move, from gpr to fpr.
+ AdditionalCost = 1;
+ }
APInt Imm = CI->getValue();
InstructionCost Cost = TTI->getIntImmCost(
Imm, CI->getType(), TargetTransformInfo::TCK_CodeSize);
assert(Cost.isValid() && "Expected a valid imm cost");
unsigned RematCost = *Cost.getValue();
+ RematCost += AdditionalCost;
Register Reg = MI.getOperand(0).getReg();
unsigned MaxUses = maxUses(RematCost);
// Don't pass UINT_MAX sentinal value to hasAtMostUserInstrs().
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
index aea306643ccd05..5ab086ffd2c13a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
@@ -124,7 +124,7 @@ define i32 @imm_cost_too_large_cost_of_2() {
; CHECK-NEXT: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s32) from @var1)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2228259
- ; CHECK-NEXT: [[OPAQUE:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C1]]
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s32) = G_CONSTANT_FOLD_BARRIER [[C1]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LOAD]](s32), [[C2]]
; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
@@ -134,19 +134,19 @@ define i32 @imm_cost_too_large_cost_of_2() {
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[GV3:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s32), [[GV3]](p0) :: (store (s32) into @var2)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s32), [[GV3]](p0) :: (store (s32) into @var2)
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.if.then2:
; CHECK-NEXT: successors: %bb.4(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[GV4:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s32), [[GV4]](p0) :: (store (s32) into @var1)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s32), [[GV4]](p0) :: (store (s32) into @var1)
; CHECK-NEXT: G_BR %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.if.end:
; CHECK-NEXT: [[GV5:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s32), [[GV5]](p0) :: (store (s32) into @var3)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s32), [[GV5]](p0) :: (store (s32) into @var3)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: $w0 = COPY [[C3]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -180,7 +180,7 @@ define i64 @imm_cost_too_large_cost_of_4() {
; CHECK-NEXT: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s64) from @var1_64, align 4)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -2228259
- ; CHECK-NEXT: [[OPAQUE:%[0-9]+]]:_(s64) = G_CONSTANT_FOLD_BARRIER [[C1]]
+ ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(s64) = G_CONSTANT_FOLD_BARRIER [[C1]]
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LOAD]](s64), [[C2]]
; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
@@ -190,19 +190,19 @@ define i64 @imm_cost_too_large_cost_of_4() {
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[GV3:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2_64
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s64), [[GV3]](p0) :: (store (s64) into @var2_64)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s64), [[GV3]](p0) :: (store (s64) into @var2_64)
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.if.then2:
; CHECK-NEXT: successors: %bb.4(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[GV4:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s64), [[GV4]](p0) :: (store (s64) into @var1_64)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s64), [[GV4]](p0) :: (store (s64) into @var1_64)
; CHECK-NEXT: G_BR %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.if.end:
; CHECK-NEXT: [[GV5:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3_64
- ; CHECK-NEXT: G_STORE [[OPAQUE]](s64), [[GV5]](p0) :: (store (s64) into @var3_64)
+ ; CHECK-NEXT: G_STORE [[CONSTANT_FOLD_BARRIER]](s64), [[GV5]](p0) :: (store (s64) into @var3_64)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: $x0 = COPY [[C3]](s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
@@ -225,6 +225,119 @@ if.end:
ret i64 0
}
+define i64 @f64_imm_cost_too_high(double %a) {
+ ; CHECK-LABEL: name: f64_imm_cost_too_high
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e-02
+ ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2_64
+ ; CHECK-NEXT: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3_64
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s64) from @var1_64, align 4)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LOAD]](s64), [[C2]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
+ ; CHECK-NEXT: G_BR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.then:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[GV3:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2_64
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[GV3]](p0) :: (store (s64) into @var2_64)
+ ; CHECK-NEXT: G_BR %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.if.then2:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[GV4:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[GV4]](p0) :: (store (s64) into @var1_64)
+ ; CHECK-NEXT: G_BR %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.if.end:
+ ; CHECK-NEXT: [[GV5:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3_64
+ ; CHECK-NEXT: G_STORE [[C]](s64), [[GV5]](p0) :: (store (s64) into @var3_64)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: $x0 = COPY [[C3]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+entry:
+ %0 = load i64, ptr @var1_64, align 4
+ %cmp = icmp eq i64 %0, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store double 1.000000e-02, ptr @var2_64
+ br label %if.then2
+
+if.then2:
+ store double 1.000000e-02, ptr @var1_64
+ br label %if.end
+
+if.end:
+ store double 1.000000e-02, ptr @var3_64
+ ret i64 0
+}
+
+define i64 @f64_imm_cheap(double %a) {
+ ; CHECK-LABEL: name: f64_imm_cheap
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2_64
+ ; CHECK-NEXT: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3_64
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s64) from @var1_64, align 4)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LOAD]](s64), [[C2]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.4
+ ; CHECK-NEXT: G_BR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.then:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[GV3:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2_64
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; CHECK-NEXT: G_STORE [[C3]](s64), [[GV3]](p0) :: (store (s64) into @var2_64)
+ ; CHECK-NEXT: G_BR %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.if.then2:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; CHECK-NEXT: [[GV4:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var1_64
+ ; CHECK-NEXT: G_STORE [[C4]](s64), [[GV4]](p0) :: (store (s64) into @var1_64)
+ ; CHECK-NEXT: G_BR %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.if.end:
+ ; CHECK-NEXT: [[GV5:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var3_64
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; CHECK-NEXT: G_STORE [[C5]](s64), [[GV5]](p0) :: (store (s64) into @var3_64)
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: $x0 = COPY [[C6]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
+entry:
+ %0 = load i64, ptr @var1_64, align 4
+ %cmp = icmp eq i64 %0, 1
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ store double 0.0, ptr @var2_64
+ br label %if.then2
+
+if.then2:
+ store double 0.0, ptr @var1_64
+ br label %if.end
+
+if.end:
+ store double 0.0, ptr @var3_64
+ ret i64 0
+}
+
@var1_64 = common global i64 0, align 4
@var2_64 = common global i64 0, align 4
@var3_64 = common global i64 0, align 4
More information about the llvm-commits
mailing list