[clang] b1bc723 - [Clang] refactor CodeGenFunction::EmitAsmStmt NFC
Nick Desaulniers via cfe-commits
cfe-commits at lists.llvm.org
Thu Feb 16 18:04:04 PST 2023
Author: Nick Desaulniers
Date: 2023-02-16T17:58:34-08:00
New Revision: b1bc723dfe9734a8b3157dbf50328b5d62436bd6
URL: https://github.com/llvm/llvm-project/commit/b1bc723dfe9734a8b3157dbf50328b5d62436bd6
DIFF: https://github.com/llvm/llvm-project/commit/b1bc723dfe9734a8b3157dbf50328b5d62436bd6.diff
LOG: [Clang] refactor CodeGenFunction::EmitAsmStmt NFC
Prerequisite to further modifications in D136497.
Basically, there is a large body of code in CodeGenFunction::EmitAsmStmt
for emitting stores of outputs. We want to be able to repeat this logic,
for each destination of a callbr (rather than just the default
destination which is what the code currently does).
Also does some smaller cleanups like whitespace cleanups, and removing
pointless casts.
Reviewed By: void, jyknight
Differential Revision: https://reviews.llvm.org/D137113
Added:
Modified:
clang/lib/CodeGen/CGStmt.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 248ffb5440147..dfe5abcfe2761 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -24,6 +24,7 @@
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Assumptions.h"
@@ -2327,6 +2328,93 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
}
}
+static void
+EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
+ const llvm::ArrayRef<llvm::Value *> RegResults,
+ const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
+ const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
+ const llvm::ArrayRef<LValue> ResultRegDests,
+ const llvm::ArrayRef<QualType> ResultRegQualTys,
+ const llvm::BitVector &ResultTypeRequiresCast,
+ const llvm::BitVector &ResultRegIsFlagReg) {
+ CGBuilderTy &Builder = CGF.Builder;
+ CodeGenModule &CGM = CGF.CGM;
+ llvm::LLVMContext &CTX = CGF.getLLVMContext();
+
+ assert(RegResults.size() == ResultRegTypes.size());
+ assert(RegResults.size() == ResultTruncRegTypes.size());
+ assert(RegResults.size() == ResultRegDests.size());
+ // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+ // in which case its size may grow.
+ assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
+ assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
+ // Target must guarantee the Value `Tmp` here is lowered to a boolean
+ // value.
+ llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
+ llvm::Value *IsBooleanValue =
+ Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
+ llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
+ Builder.CreateCall(FnAssume, IsBooleanValue);
+ }
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != TruncTy) {
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =
+ CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ LValue Dest = ResultRegDests[i];
+ // ResultTypeRequiresCast elements correspond to the first
+ // ResultTypeRequiresCast.size() elements of RegResults.
+ if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
+ unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
+ Address A =
+ Builder.CreateElementBitCast(Dest.getAddress(CGF), ResultRegTypes[i]);
+ if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
+ Builder.CreateStore(Tmp, A);
+ continue;
+ }
+
+ QualType Ty =
+ CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
+ if (Ty.isNull()) {
+ const Expr *OutExpr = S.getOutputExpr(i);
+ CGM.getDiags().Report(OutExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
+ return;
+ }
+ Dest = CGF.MakeAddrLValue(A, Ty);
+ }
+ CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
+ }
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Pop all cleanup blocks at the end of the asm statement.
CodeGenFunction::RunCleanupsScope Cleanups(*this);
@@ -2627,7 +2715,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
bool IsGCCAsmGoto = false;
- if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
+ if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
IsGCCAsmGoto = GS->isAsmGoto();
if (IsGCCAsmGoto) {
for (const auto *E : GS->labels()) {
@@ -2725,9 +2813,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::CallBrInst *Result =
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
EmitBlock(Fallthrough);
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, ArgElemTypes, *this, RegResults);
+ UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
@@ -2736,80 +2824,14 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, ArgElemTypes, *this, RegResults);
+ UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
}
- assert(RegResults.size() == ResultRegTypes.size());
- assert(RegResults.size() == ResultTruncRegTypes.size());
- assert(RegResults.size() == ResultRegDests.size());
- // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
- // in which case its size may grow.
- assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
- assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
- for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
- llvm::Value *Tmp = RegResults[i];
- llvm::Type *TruncTy = ResultTruncRegTypes[i];
-
- if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
- // Target must guarantee the Value `Tmp` here is lowered to a boolean
- // value.
- llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
- llvm::Value *IsBooleanValue =
- Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
- llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
- Builder.CreateCall(FnAssume, IsBooleanValue);
- }
-
- // If the result type of the LLVM IR asm doesn't match the result type of
- // the expression, do the conversion.
- if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
-
- // Truncate the integer result to the right size, note that TruncTy can be
- // a pointer.
- if (TruncTy->isFloatingPointTy())
- Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
- else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
- uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
- Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
- } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
- uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
- Tmp = Builder.CreateTrunc(Tmp, TruncTy);
- } else if (TruncTy->isIntegerTy()) {
- Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
- } else if (TruncTy->isVectorTy()) {
- Tmp = Builder.CreateBitCast(Tmp, TruncTy);
- }
- }
-
- LValue Dest = ResultRegDests[i];
- // ResultTypeRequiresCast elements correspond to the first
- // ResultTypeRequiresCast.size() elements of RegResults.
- if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
- unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Builder.CreateElementBitCast(Dest.getAddress(*this),
- ResultRegTypes[i]);
- if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
- Builder.CreateStore(Tmp, A);
- continue;
- }
-
- QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
- if (Ty.isNull()) {
- const Expr *OutExpr = S.getOutputExpr(i);
- CGM.getDiags().Report(OutExpr->getExprLoc(),
- diag::err_store_value_to_reg);
- return;
- }
- Dest = MakeAddrLValue(A, Ty);
- }
- EmitStoreThroughLValue(RValue::get(Tmp), Dest);
- }
+ EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
+ ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
+ ResultRegIsFlagReg);
}
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
More information about the cfe-commits
mailing list