[llvm] r297875 - [GlobalISel] Avoid translating synthetic constants to new G_CONSTANTS.
Ahmed Bougacha via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 15 12:21:11 PDT 2017
Author: ab
Date: Wed Mar 15 14:21:11 2017
New Revision: 297875
URL: http://llvm.org/viewvc/llvm-project?rev=297875&view=rev
Log:
[GlobalISel] Avoid translating synthetic constants to new G_CONSTANTS.
Currently, we create a G_CONSTANT for every "synthetic" integer
constant operand (for instance, for the G_GEP offset).
Instead, share the G_CONSTANTs we might have created by going through
the ValueToVReg machinery.
When we're emitting synthetic constants, we do need to get Constants from
the context. One could argue that we shouldn't modify the context at
all (for instance, this means that we're going to use a tad more memory
if the constant wasn't used elsewhere), but constants are mostly
harmless. We currently do this for extractvalue and all.
For constant fcmp, this does mean we'll emit an extra COPY, which is not
necessarily more optimal than an extra materialized constant.
But that preserves the current intended design of uniqued G_CONSTANTs,
and the rematerialization problem exists elsewhere and should be
resolved with a single coherent solution.
Modified:
llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
Modified: llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp?rev=297875&r1=297874&r2=297875&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp Wed Mar 15 14:21:11 2017
@@ -190,9 +190,11 @@ bool IRTranslator::translateCompare(cons
if (CmpInst::isIntPredicate(Pred))
MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else if (Pred == CmpInst::FCMP_FALSE)
- MIRBuilder.buildConstant(Res, 0);
- else if (Pred == CmpInst::FCMP_TRUE)
- MIRBuilder.buildConstant(Res, 1);
+ MIRBuilder.buildCopy(
+ Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
+ else if (Pred == CmpInst::FCMP_TRUE)
+ MIRBuilder.buildCopy(
+ Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
else
MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
@@ -426,9 +428,10 @@ bool IRTranslator::translateGetElementPt
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
- LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
- unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
- LLT OffsetTy = LLT::scalar(PtrSize);
+ Type *PtrIRTy = Op0.getType();
+ LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
+ Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
+ LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
int64_t Offset = 0;
for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
@@ -450,8 +453,8 @@ bool IRTranslator::translateGetElementPt
if (Offset != 0) {
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
- unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(OffsetReg, Offset);
+ unsigned OffsetReg =
+ getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
BaseReg = NewBaseReg;
@@ -459,8 +462,8 @@ bool IRTranslator::translateGetElementPt
}
// N = N + Idx * ElementSize;
- unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
+ unsigned ElementSizeReg =
+ getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
unsigned IdxReg = getOrCreateVReg(*Idx);
if (MRI->getType(IdxReg) != OffsetTy) {
@@ -479,8 +482,7 @@ bool IRTranslator::translateGetElementPt
}
if (Offset != 0) {
- unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildConstant(OffsetReg, Offset);
+ unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
return true;
}
@@ -561,8 +563,8 @@ bool IRTranslator::translateOverflowIntr
.addUse(getOrCreateVReg(*CI.getOperand(1)));
if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
- unsigned Zero = MRI->createGenericVirtualRegister(s1);
- EntryBuilder.buildConstant(Zero, 0);
+ unsigned Zero = getOrCreateVReg(
+ *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
MIB.addUse(Zero);
}
@@ -914,7 +916,8 @@ bool IRTranslator::translateAlloca(const
unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
- LLT IntPtrTy = LLT::scalar(DL->getPointerSizeInBits());
+ Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
+ LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
if (MRI->getType(NumElts) != IntPtrTy) {
unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
@@ -922,8 +925,8 @@ bool IRTranslator::translateAlloca(const
}
unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
- unsigned TySize = MRI->createGenericVirtualRegister(IntPtrTy);
- MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
+ unsigned TySize =
+ getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll?rev=297875&r1=297874&r2=297875&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll Wed Mar 15 14:21:11 2017
@@ -1064,8 +1064,10 @@ define void @float_comparison(float* %a.
}
; CHECK-LABEL: name: trivial_float_comparison
-; CHECK: [[R1:%[0-9]+]](s1) = G_CONSTANT i1 false
-; CHECK: [[R2:%[0-9]+]](s1) = G_CONSTANT i1 true
+; CHECK: [[ENTRY_R1:%[0-9]+]](s1) = G_CONSTANT i1 false
+; CHECK: [[ENTRY_R2:%[0-9]+]](s1) = G_CONSTANT i1 true
+; CHECK: [[R1:%[0-9]+]](s1) = COPY [[ENTRY_R1]](s1)
+; CHECK: [[R2:%[0-9]+]](s1) = COPY [[ENTRY_R2]](s1)
; CHECK: G_ADD [[R1]], [[R2]]
define i1 @trivial_float_comparison(double %a, double %b) {
%r1 = fcmp false double %a, %b
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll?rev=297875&r1=297874&r2=297875&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll Wed Mar 15 14:21:11 2017
@@ -2,8 +2,8 @@
; CHECK-LABEL: name: test_simple_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
@@ -18,8 +18,8 @@ define i8* @test_simple_alloca(i32 %nume
; CHECK-LABEL: name: test_aligned_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -1
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
@@ -34,8 +34,8 @@ define i8* @test_aligned_alloca(i32 %num
; CHECK-LABEL: name: test_natural_alloca
; CHECK: [[NUMELTS:%[0-9]+]](s32) = COPY %w0
-; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[TYPE_SIZE:%[0-9]+]](s64) = G_CONSTANT i64 -16
+; CHECK: [[NUMELTS_64:%[0-9]+]](s64) = G_ZEXT [[NUMELTS]](s32)
; CHECK: [[NUMBYTES:%[0-9]+]](s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
; CHECK: [[SP_TMP:%[0-9]+]](p0) = COPY %sp
; CHECK: [[ALLOC:%[0-9]+]](p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll?rev=297875&r1=297874&r2=297875&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll Wed Mar 15 14:21:11 2017
@@ -58,8 +58,8 @@ define i32* @const_then_var(%type1* %add
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 272
-; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 4
+; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[BASE2]](p0)
@@ -74,9 +74,9 @@ define i32* @var_then_const(%type1* %add
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT i64 64
+; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
-; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT i64 40
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: %x0 = COPY [[BASE2]](p0)
More information about the llvm-commits
mailing list