[clang] 4b2cb11 - [CIR] Upstream lowering of conditional operators to TernaryOp (#138156)
via cfe-commits
cfe-commits at lists.llvm.org
Tue Jun 3 04:29:27 PDT 2025
Author: Morris Hafner
Date: 2025-06-03T13:29:23+02:00
New Revision: 4b2cb118bc5825c309724d536053c6f9817e2eb9
URL: https://github.com/llvm/llvm-project/commit/4b2cb118bc5825c309724d536053c6f9817e2eb9
DIFF: https://github.com/llvm/llvm-project/commit/4b2cb118bc5825c309724d536053c6f9817e2eb9.diff
LOG: [CIR] Upstream lowering of conditional operators to TernaryOp (#138156)
This patch adds visitors for BinLAnd, BinLOr and
AbstractConditionalOperator. Note that this patch still lacks visitation
of OpaqueValueExpr which is needed for the GNU ?: operator.
---------
Co-authored-by: Erich Keane <ekeane at nvidia.com>
Added:
clang/test/CIR/CodeGen/binop.c
clang/test/CIR/CodeGen/ternary.cpp
Modified:
clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
clang/include/clang/CIR/MissingFeatures.h
clang/lib/CIR/CodeGen/CIRGenDecl.cpp
clang/lib/CIR/CodeGen/CIRGenExpr.cpp
clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
clang/lib/CIR/CodeGen/CIRGenFunction.h
clang/lib/CIR/CodeGen/CIRGenValue.h
clang/test/CIR/CodeGen/binop.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index e9d8a2baedf2f..5cd0caa823ca1 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -300,6 +300,24 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
return createBinop(loc, lhs, cir::BinOpKind::Or, rhs);
}
+ mlir::Value createSelect(mlir::Location loc, mlir::Value condition,
+ mlir::Value trueValue, mlir::Value falseValue) {
+ assert(trueValue.getType() == falseValue.getType() &&
+ "trueValue and falseValue should have the same type");
+ return create<cir::SelectOp>(loc, trueValue.getType(), condition, trueValue,
+ falseValue);
+ }
+
+ mlir::Value createLogicalAnd(mlir::Location loc, mlir::Value lhs,
+ mlir::Value rhs) {
+ return createSelect(loc, lhs, rhs, getBool(false, loc));
+ }
+
+ mlir::Value createLogicalOr(mlir::Location loc, mlir::Value lhs,
+ mlir::Value rhs) {
+ return createSelect(loc, lhs, getBool(true, loc), rhs);
+ }
+
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
OverflowBehavior ob = OverflowBehavior::None) {
auto op =
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 65978e51a23e9..2a7cd464b8f6b 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -208,6 +208,8 @@ struct MissingFeatures {
static bool deferredDecls() { return false; }
static bool setTargetAttributes() { return false; }
static bool coverageMapping() { return false; }
+ static bool peepholeProtection() { return false; }
+ static bool instrumentation() { return false; }
// Missing types
static bool dataMemberType() { return false; }
@@ -232,8 +234,9 @@ struct MissingFeatures {
static bool ptrDiffOp() { return false; }
static bool ptrStrideOp() { return false; }
static bool switchOp() { return false; }
- static bool ternaryOp() { return false; }
+ static bool throwOp() { return false; }
static bool tryOp() { return false; }
+ static bool vecTernaryOp() { return false; }
static bool zextOp() { return false; }
// Future CIR attributes
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 61af33053dc0a..80b0172090aa3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -50,8 +50,7 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d) {
// A normal fixed sized variable becomes an alloca in the entry block,
mlir::Type allocaTy = convertTypeForMem(ty);
// Create the temp alloca and declare variable using it.
- address = createTempAlloca(allocaTy, alignment, loc, d.getName(),
- /*insertIntoFnEntryBlock=*/false);
+ address = createTempAlloca(allocaTy, alignment, loc, d.getName());
declare(address.getPointer(), &d, ty, getLoc(d.getSourceRange()), alignment);
emission.Addr = address;
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 5424c6a8d6f3c..1175fdc0be2cf 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -15,6 +15,7 @@
#include "CIRGenModule.h"
#include "CIRGenValue.h"
#include "mlir/IR/BuiltinAttributes.h"
+#include "mlir/IR/Value.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -22,6 +23,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/MissingFeatures.h"
+#include <optional>
using namespace clang;
using namespace clang::CIRGen;
@@ -229,7 +231,7 @@ void CIRGenFunction::emitStoreThroughLValue(RValue src, LValue dst,
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
const VarDecl *vd) {
- QualType T = e->getType();
+ QualType t = e->getType();
// If it's thread_local, emit a call to its wrapper function instead.
assert(!cir::MissingFeatures::opGlobalThreadLocal());
@@ -259,7 +261,7 @@ static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
cgf.cgm.errorNYI(e->getSourceRange(),
"emitGlobalVarDeclLValue: reference type");
else
- lv = cgf.makeAddrLValue(addr, T, AlignmentSource::Decl);
+ lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
assert(!cir::MissingFeatures::setObjCGCLValueClass());
return lv;
}
@@ -1259,10 +1261,28 @@ mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
// cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
assert(!cir::MissingFeatures::shouldReverseUnaryCondOnBoolExpr());
- if (isa<ConditionalOperator>(cond)) {
- cgm.errorNYI(cond->getExprLoc(), "Ternary NYI");
- assert(!cir::MissingFeatures::ternaryOp());
- return createDummyValue(loc, cond->getType());
+ if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
+ Expr *trueExpr = condOp->getTrueExpr();
+ Expr *falseExpr = condOp->getFalseExpr();
+ mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
+
+ mlir::Value ternaryOpRes =
+ builder
+ .create<cir::TernaryOp>(
+ loc, condV, /*thenBuilder=*/
+ [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
+ mlir::Value lhs = emitScalarExpr(trueExpr);
+ b.create<cir::YieldOp>(loc, lhs);
+ },
+ /*elseBuilder=*/
+ [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
+ mlir::Value rhs = emitScalarExpr(falseExpr);
+ b.create<cir::YieldOp>(loc, rhs);
+ })
+ .getResult();
+
+ return emitScalarConversion(ternaryOpRes, condOp->getType(),
+ getContext().BoolTy, condOp->getExprLoc());
}
if (isa<CXXThrowExpr>(cond)) {
@@ -1394,13 +1414,84 @@ mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
return builder.createDummyValue(loc, t, alignment);
}
-/// This creates an alloca and inserts it into the entry block if
-/// \p insertIntoFnEntryBlock is true, otherwise it inserts it at the current
-/// insertion point of the builder.
+//===----------------------------------------------------------------------===//
+// CIR builder helpers
+//===----------------------------------------------------------------------===//
+
+Address CIRGenFunction::createMemTemp(QualType ty, mlir::Location loc,
+ const Twine &name, Address *alloca,
+ mlir::OpBuilder::InsertPoint ip) {
+ // FIXME: Should we prefer the preferred type alignment here?
+ return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
+ alloca, ip);
+}
+
+Address CIRGenFunction::createMemTemp(QualType ty, CharUnits align,
+ mlir::Location loc, const Twine &name,
+ Address *alloca,
+ mlir::OpBuilder::InsertPoint ip) {
+ Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
+ /*ArraySize=*/nullptr, alloca, ip);
+ if (ty->isConstantMatrixType()) {
+ assert(!cir::MissingFeatures::matrixType());
+ cgm.errorNYI(loc, "temporary matrix value");
+ }
+ return result;
+}
+
+/// This creates a alloca and inserts it into the entry block of the
+/// current region.
+Address CIRGenFunction::createTempAllocaWithoutCast(
+ mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
+ mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
+ cir::AllocaOp alloca = ip.isSet()
+ ? createTempAlloca(ty, loc, name, ip, arraySize)
+ : createTempAlloca(ty, loc, name, arraySize);
+ alloca.setAlignmentAttr(cgm.getSize(align));
+ return Address(alloca, ty, align);
+}
+
+/// This creates a alloca and inserts it into the entry block. The alloca is
+/// casted to default address space if necessary.
Address CIRGenFunction::createTempAlloca(mlir::Type ty, CharUnits align,
mlir::Location loc, const Twine &name,
- bool insertIntoFnEntryBlock) {
- mlir::Value alloca =
- emitAlloca(name.str(), ty, loc, align, insertIntoFnEntryBlock);
- return Address(alloca, ty, align);
+ mlir::Value arraySize,
+ Address *allocaAddr,
+ mlir::OpBuilder::InsertPoint ip) {
+ Address alloca =
+ createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
+ if (allocaAddr)
+ *allocaAddr = alloca;
+ mlir::Value v = alloca.getPointer();
+ // Alloca always returns a pointer in alloca address space, which may
+ // be
diff erent from the type defined by the language. For example,
+ // in C++ the auto variables are in the default address space. Therefore
+ // cast alloca to the default address space when necessary.
+ assert(!cir::MissingFeatures::addressSpace());
+ return Address(v, ty, align);
+}
+
+/// This creates an alloca and inserts it into the entry block if \p ArraySize
+/// is nullptr, otherwise inserts it at the current insertion point of the
+/// builder.
+cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
+ mlir::Location loc,
+ const Twine &name,
+ mlir::Value arraySize,
+ bool insertIntoFnEntryBlock) {
+ return cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
+ insertIntoFnEntryBlock, arraySize)
+ .getDefiningOp());
+}
+
+/// This creates an alloca and inserts it into the provided insertion point
+cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
+ mlir::Location loc,
+ const Twine &name,
+ mlir::OpBuilder::InsertPoint ip,
+ mlir::Value arraySize) {
+ assert(ip.isSet() && "Insertion point is not set");
+ return cast<cir::AllocaOp>(
+ emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
+ .getDefiningOp());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index b375bcf2f483f..56d7ea3884ba7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -155,8 +155,7 @@ void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
// Allocate the temporary variable
// to store the pointer to first unitialized element
const Address tmpAddr = cgf.createTempAlloca(
- cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp",
- /*insertIntoFnEntryBlock=*/false);
+ cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
@@ -274,3 +273,11 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
}
+
+LValue CIRGenFunction::emitAggExprToLValue(const Expr *e) {
+ assert(hasAggregateEvaluationKind(e->getType()) && "Invalid argument!");
+ Address temp = createMemTemp(e->getType(), getLoc(e->getSourceRange()));
+ LValue lv = makeAddrLValue(temp, e->getType());
+ emitAggExpr(e, AggValueSlot::forLValue(lv));
+ return lv;
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 36d52251a0106..77287ec45972d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -98,6 +98,14 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
mlir::Value emitPromoted(const Expr *e, QualType promotionType);
+ mlir::Value maybePromoteBoolResult(mlir::Value value,
+ mlir::Type dstTy) const {
+ if (mlir::isa<cir::IntType>(dstTy))
+ return builder.createBoolToInt(value, dstTy);
+ if (mlir::isa<cir::BoolType>(dstTy))
+ return value;
+ }
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
@@ -334,6 +342,8 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
}
mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
+ mlir::Value
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
// Unary Operators.
mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
@@ -934,6 +944,71 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
// NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
return Visit(e->getRHS());
}
+
+ mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
+ if (e->getType()->isVectorType()) {
+ assert(!cir::MissingFeatures::vectorType());
+ return {};
+ }
+
+ assert(!cir::MissingFeatures::instrumentation());
+ mlir::Type resTy = cgf.convertType(e->getType());
+ mlir::Location loc = cgf.getLoc(e->getExprLoc());
+
+ CIRGenFunction::ConditionalEvaluation eval(cgf);
+
+ mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
+ auto resOp = builder.create<cir::TernaryOp>(
+ loc, lhsCondV, /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc,
+ b.getInsertionBlock()};
+ cgf.curLexScope->setAsTernary();
+ b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
+ },
+ /*falseBuilder*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc,
+ b.getInsertionBlock()};
+ cgf.curLexScope->setAsTernary();
+ auto res = b.create<cir::ConstantOp>(loc, builder.getFalseAttr());
+ b.create<cir::YieldOp>(loc, res.getRes());
+ });
+ return maybePromoteBoolResult(resOp.getResult(), resTy);
+ }
+
+ mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
+ if (e->getType()->isVectorType()) {
+ assert(!cir::MissingFeatures::vectorType());
+ return {};
+ }
+
+ assert(!cir::MissingFeatures::instrumentation());
+ mlir::Type resTy = cgf.convertType(e->getType());
+ mlir::Location loc = cgf.getLoc(e->getExprLoc());
+
+ CIRGenFunction::ConditionalEvaluation eval(cgf);
+
+ mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
+ auto resOp = builder.create<cir::TernaryOp>(
+ loc, lhsCondV, /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc,
+ b.getInsertionBlock()};
+ cgf.curLexScope->setAsTernary();
+ auto res = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
+ b.create<cir::YieldOp>(loc, res.getRes());
+ },
+ /*falseBuilder*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc,
+ b.getInsertionBlock()};
+ cgf.curLexScope->setAsTernary();
+ b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
+ });
+
+ return maybePromoteBoolResult(resOp.getResult(), resTy);
+ }
};
LValue ScalarExprEmitter::emitCompoundAssignLValue(
@@ -1781,11 +1856,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
boolVal = builder.createNot(boolVal);
// ZExt result to the expr type.
- mlir::Type dstTy = cgf.convertType(e->getType());
- if (mlir::isa<cir::IntType>(dstTy))
- return builder.createBoolToInt(boolVal, dstTy);
- if (mlir::isa<cir::BoolType>(dstTy))
- return boolVal;
+ return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
cgf.cgm.errorNYI("destination type for logical-not unary operator is NYI");
return {};
@@ -1828,6 +1899,162 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
cgf.cgm.UInt64Ty, e->EvaluateKnownConstInt(cgf.getContext())));
}
+/// Return true if the specified expression is cheap enough and side-effect-free
+/// enough to evaluate unconditionally instead of conditionally. This is used
+/// to convert control flow into selects in some cases.
+/// TODO(cir): can be shared with LLVM codegen.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e,
+ CIRGenFunction &cgf) {
+ // Anything that is an integer or floating point constant is fine.
+ return e->IgnoreParens()->isEvaluatable(cgf.getContext());
+
+ // Even non-volatile automatic variables can't be evaluated unconditionally.
+ // Referencing a thread_local may cause non-trivial initialization work to
+ // occur. If we're inside a lambda and one of the variables is from the scope
+ // outside the lambda, that function may have returned already. Reading its
+ // locals is a bad idea. Also, these reads may introduce races there didn't
+ // exist in the source-level program.
+}
+
+mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
+ const AbstractConditionalOperator *e) {
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ mlir::Location loc = cgf.getLoc(e->getSourceRange());
+ ignoreResultAssign = false;
+
+ // Bind the common expression if necessary.
+ CIRGenFunction::OpaqueValueMapping binding(cgf, e);
+
+ Expr *condExpr = e->getCond();
+ Expr *lhsExpr = e->getTrueExpr();
+ Expr *rhsExpr = e->getFalseExpr();
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ bool condExprBool;
+ if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (!condExprBool)
+ std::swap(live, dead);
+
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!cgf.containsLabel(dead)) {
+ if (condExprBool)
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ mlir::Value result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value. However, a conditional
+ // with non-void type must return a non-null Value.
+ if (!result && !e->getType()->isVoidType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "throw expression in conditional operator");
+ result = {};
+ }
+
+ return result;
+ }
+ }
+
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if ((cgf.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
+ condExpr->getType()->isExtVectorType()) {
+ assert(!cir::MissingFeatures::vectorType());
+ cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
+ }
+
+ if (condExpr->getType()->isVectorType() ||
+ condExpr->getType()->isSveVLSBuiltinType()) {
+ assert(!cir::MissingFeatures::vecTernaryOp());
+ cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
+ return {};
+ }
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap
+ // and safe to evaluate the LHS and RHS unconditionally.
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, cgf)) {
+ bool lhsIsVoid = false;
+ mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+
+ mlir::Value lhs = Visit(lhsExpr);
+ if (!lhs) {
+ lhs = builder.getNullValue(cgf.VoidTy, loc);
+ lhsIsVoid = true;
+ }
+
+ mlir::Value rhs = Visit(rhsExpr);
+ if (lhsIsVoid) {
+ assert(!rhs && "lhs and rhs types must match");
+ rhs = builder.getNullValue(cgf.VoidTy, loc);
+ }
+
+ return builder.createSelect(loc, condV, lhs, rhs);
+ }
+
+ mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
+ CIRGenFunction::ConditionalEvaluation eval(cgf);
+ SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
+ mlir::Type yieldTy{};
+
+ auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
+ CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
+ cgf.curLexScope->setAsTernary();
+
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ eval.beginEvaluation();
+ mlir::Value branch = Visit(expr);
+ eval.endEvaluation();
+
+ if (branch) {
+ yieldTy = branch.getType();
+ b.create<cir::YieldOp>(loc, branch);
+ } else {
+ // If LHS or RHS is a throw or void expression we need to patch
+ // arms as to properly match yield types.
+ insertPoints.push_back(b.saveInsertionPoint());
+ }
+ };
+
+ mlir::Value result = builder
+ .create<cir::TernaryOp>(
+ loc, condV,
+ /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, lhsExpr);
+ },
+ /*falseBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, rhsExpr);
+ })
+ .getResult();
+
+ if (!insertPoints.empty()) {
+ // If both arms are void, so be it.
+ if (!yieldTy)
+ yieldTy = cgf.VoidTy;
+
+ // Insert required yields.
+ for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(toInsert);
+
+ // Block does not return: build empty yield.
+ if (mlir::isa<cir::VoidType>(yieldTy)) {
+ builder.create<cir::YieldOp>(loc);
+ } else { // Block returns: set null yield value.
+ mlir::Value op0 = builder.getNullValue(yieldTy, loc);
+ builder.create<cir::YieldOp>(loc, op0);
+ }
+ }
+ }
+
+ return result;
+}
+
mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
LValue lv, bool isInc,
bool isPre) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index ef61aa7f4ee6d..ee014adc961be 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -118,6 +118,144 @@ class CIRGenFunction : public CIRGenTypeCache {
const TargetInfo &getTarget() const { return cgm.getTarget(); }
mlir::MLIRContext &getMLIRContext() { return cgm.getMLIRContext(); }
+ // ---------------------
+ // Opaque value handling
+ // ---------------------
+
+ /// Keeps track of the current set of opaque value expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> opaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> opaqueRValues;
+
+public:
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
+ const OpaqueValueExpr *opaqueValue;
+ bool boundLValue;
+
+ OpaqueValueMappingData(const OpaqueValueExpr *ov, bool boundLValue)
+ : opaqueValue(ov), boundLValue(boundLValue) {}
+
+ public:
+ OpaqueValueMappingData() : opaqueValue(nullptr) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() || expr->getType()->isFunctionType() ||
+ hasAggregateEvaluationKind(expr->getType());
+ }
+
+ static OpaqueValueMappingData
+ bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(cgf, ov, cgf.emitLValue(e));
+ return bind(cgf, ov, cgf.emitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData
+ bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ cgf.opaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData
+ bind(CIRGenFunction &cgf, const OpaqueValueExpr *ov, const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ cgf.opaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ assert(!cir::MissingFeatures::peepholeProtection() && "NYI");
+ return data;
+ }
+
+ bool isValid() const { return opaqueValue != nullptr; }
+ void clear() { opaqueValue = nullptr; }
+
+ void unbind(CIRGenFunction &cgf) {
+ assert(opaqueValue && "no data to unbind!");
+
+ if (boundLValue) {
+ cgf.opaqueLValues.erase(opaqueValue);
+ } else {
+ cgf.opaqueRValues.erase(opaqueValue);
+ assert(!cir::MissingFeatures::peepholeProtection() && "NYI");
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CIRGenFunction &cgf;
+ OpaqueValueMappingData data;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CIRGenFunction &cgf,
+ const AbstractConditionalOperator *op)
+ : cgf(cgf) {
+ if (mlir::isa<ConditionalOperator>(op))
+ // Leave Data empty.
+ return;
+
+ const BinaryConditionalOperator *e =
+ mlir::cast<BinaryConditionalOperator>(op);
+ data = OpaqueValueMappingData::bind(cgf, e->getOpaqueValue(),
+ e->getCommon());
+ }
+
+ /// Build the opaque value mapping for an OpaqueValueExpr whose source
+ /// expression is set to the expression the OVE represents.
+ OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *ov)
+ : cgf(cgf) {
+ if (ov) {
+ assert(ov->getSourceExpr() && "wrong form of OpaqueValueMapping used "
+ "for OVE with no source expression");
+ data = OpaqueValueMappingData::bind(cgf, ov, ov->getSourceExpr());
+ }
+ }
+
+ OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : cgf(cgf),
+ data(OpaqueValueMappingData::bind(cgf, opaqueValue, lvalue)) {}
+
+ OpaqueValueMapping(CIRGenFunction &cgf, const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : cgf(cgf),
+ data(OpaqueValueMappingData::bind(cgf, opaqueValue, rvalue)) {}
+
+ void pop() {
+ data.unbind(cgf);
+ data.clear();
+ }
+
+ ~OpaqueValueMapping() {
+ if (data.isValid())
+ data.unbind(cgf);
+ }
+ };
+
private:
/// Declare a variable in the current scope, return success if the variable
/// wasn't declared yet.
@@ -272,7 +410,7 @@ class CIRGenFunction : public CIRGenTypeCache {
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
- Address getObjectAddress(CIRGenFunction &CGF) const {
+ Address getObjectAddress(CIRGenFunction &cgf) const {
if (!IsEscapingByRef)
return Addr;
@@ -501,6 +639,8 @@ class CIRGenFunction : public CIRGenTypeCache {
void emitAggExpr(const clang::Expr *e, AggValueSlot slot);
+ LValue emitAggExprToLValue(const Expr *e);
+
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
@@ -747,12 +887,101 @@ class CIRGenFunction : public CIRGenTypeCache {
void emitNullabilityCheck(LValue lhs, mlir::Value rhs,
clang::SourceLocation loc);
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ CIRGenFunction &cgf;
+ mlir::OpBuilder::InsertPoint insertPt;
+
+ public:
+ ConditionalEvaluation(CIRGenFunction &cgf)
+ : cgf(cgf), insertPt(cgf.builder.saveInsertionPoint()) {}
+ ConditionalEvaluation(CIRGenFunction &cgf, mlir::OpBuilder::InsertPoint ip)
+ : cgf(cgf), insertPt(ip) {}
+
+ void beginEvaluation() {
+ assert(cgf.outermostConditional != this);
+ if (!cgf.outermostConditional)
+ cgf.outermostConditional = this;
+ }
+
+ void endEvaluation() {
+ assert(cgf.outermostConditional != nullptr);
+ if (cgf.outermostConditional == this)
+ cgf.outermostConditional = nullptr;
+ }
+
+ /// Returns the insertion point which will be executed prior to each
+ /// evaluation of the conditional code. In LLVM OG, this method
+ /// is called getStartingBlock.
+ mlir::OpBuilder::InsertPoint getInsertPoint() const { return insertPt; }
+ };
+
+ struct ConditionalInfo {
+ std::optional<LValue> lhs{}, rhs{};
+ mlir::Value result{};
+ };
+
+ // Return true if we're currently emitting one branch or the other of a
+ // conditional expression.
+ bool isInConditionalBranch() const { return outermostConditional != nullptr; }
+
+ void setBeforeOutermostConditional(mlir::Value value, Address addr) {
+ assert(isInConditionalBranch());
+ {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(outermostConditional->getInsertPoint());
+ builder.createStore(
+ value.getLoc(), value, addr,
+ mlir::IntegerAttr::get(
+ mlir::IntegerType::get(value.getContext(), 64),
+ (uint64_t)addr.getAlignment().getAsAlign().value()));
+ }
+ }
+
+ // Points to the outermost active conditional control. This is used so that
+ // we know if a temporary should be destroyed conditionally.
+ ConditionalEvaluation *outermostConditional = nullptr;
+
+ template <typename FuncTy>
+ ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e,
+ const FuncTy &branchGenFunc);
+
+ mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc,
+ const clang::Stmt *thenS,
+ const clang::Stmt *elseS);
+
/// ----------------------
/// CIR build helpers
/// -----------------
public:
+ cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
+ const Twine &name = "tmp",
+ mlir::Value arraySize = nullptr,
+ bool insertIntoFnEntryBlock = false);
+ cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc,
+ const Twine &name = "tmp",
+ mlir::OpBuilder::InsertPoint ip = {},
+ mlir::Value arraySize = nullptr);
Address createTempAlloca(mlir::Type ty, CharUnits align, mlir::Location loc,
- const Twine &name, bool insertIntoFnEntryBlock);
+ const Twine &name = "tmp",
+ mlir::Value arraySize = nullptr,
+ Address *alloca = nullptr,
+ mlir::OpBuilder::InsertPoint ip = {});
+ Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align,
+ mlir::Location loc,
+ const Twine &name = "tmp",
+ mlir::Value arraySize = nullptr,
+ mlir::OpBuilder::InsertPoint ip = {});
+
+ /// Create a temporary memory object of the given type, with
+ /// appropriate alignmen and cast it to the default address space. Returns
+ /// the original alloca instruction by \p Alloca if it is not nullptr.
+ Address createMemTemp(QualType t, mlir::Location loc,
+ const Twine &name = "tmp", Address *alloca = nullptr,
+ mlir::OpBuilder::InsertPoint ip = {});
+ Address createMemTemp(QualType t, CharUnits align, mlir::Location loc,
+ const Twine &name = "tmp", Address *alloca = nullptr,
+ mlir::OpBuilder::InsertPoint ip = {});
//===--------------------------------------------------------------------===//
// OpenACC Emission
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 3feadfaf56354..56177c948df94 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -164,6 +164,7 @@ class LValue {
clang::Qualifiers &getQuals() { return quals; }
LValueBaseInfo getBaseInfo() const { return baseInfo; }
+ void setBaseInfo(LValueBaseInfo info) { baseInfo = info; }
static LValue makeAddr(Address address, clang::QualType t,
LValueBaseInfo baseInfo) {
diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c
new file mode 100644
index 0000000000000..280fd29b067f9
--- /dev/null
+++ b/clang/test/CIR/CodeGen/binop.c
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+void conditionalResultIimplicitCast(int a, int b, float f) {
+ // Should implicit cast back to int.
+ int x = a && b;
+ // CHECK: %[[#INT:]] = cir.ternary
+ // CHECK: %{{.+}} = cir.cast(bool_to_int, %[[#INT]] : !cir.bool), !s32i
+ float y = f && f;
+ // CHECK: %[[#BOOL:]] = cir.ternary
+ // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i
+ // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), !cir.float
+}
diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp
index dbd17fb7ba83d..c728f0d0c1bc1 100644
--- a/clang/test/CIR/CodeGen/binop.cpp
+++ b/clang/test/CIR/CodeGen/binop.cpp
@@ -540,3 +540,235 @@ void long_shift_example(long long a, short b) {
// OGCG: store i64 %[[SHL]], ptr %[[X]]
// OGCG: ret void
+
+void b1(bool a, bool b) {
+ bool x = a && b;
+ x = x || b;
+}
+
+// CIR-LABEL: cir.func @_Z2b1bb(
+// CIR-SAME: %[[ARG0:.*]]: !cir.bool {{.*}}, %[[ARG1:.*]]: !cir.bool {{.*}})
+// CIR: [[A:%[0-9]+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["a", init]
+// CIR: [[B:%[0-9]+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b", init]
+// CIR: [[X:%[0-9]+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["x", init]
+// CIR: cir.store %[[ARG0]], [[A]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: cir.store %[[ARG1]], [[B]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: [[AVAL:%[0-9]+]] = cir.load align(1) [[A]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: [[RES1:%[0-9]+]] = cir.ternary([[AVAL]], true {
+// CIR: [[BVAL:%[0-9]+]] = cir.load align(1) [[B]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: cir.yield [[BVAL]] : !cir.bool
+// CIR: }, false {
+// CIR: [[FALSE:%[0-9]+]] = cir.const #false
+// CIR: cir.yield [[FALSE]] : !cir.bool
+// CIR: }) : (!cir.bool) -> !cir.bool
+// CIR: cir.store align(1) [[RES1]], [[X]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: [[XVAL:%[0-9]+]] = cir.load align(1) [[X]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: [[RES2:%[0-9]+]] = cir.ternary([[XVAL]], true {
+// CIR: [[TRUE:%[0-9]+]] = cir.const #true
+// CIR: cir.yield [[TRUE]] : !cir.bool
+// CIR: }, false {
+// CIR: [[BVAL2:%[0-9]+]] = cir.load align(1) [[B]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: cir.yield [[BVAL2]] : !cir.bool
+// CIR: }) : (!cir.bool) -> !cir.bool
+// CIR: cir.store align(1) [[RES2]], [[X]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: cir.return
+
+
+// LLVM-LABEL: define void @_Z2b1bb(
+// LLVM-SAME: i1 %[[ARG0:.+]], i1 %[[ARG1:.+]])
+// LLVM: %[[A_ADDR:.*]] = alloca i8
+// LLVM: %[[B_ADDR:.*]] = alloca i8
+// LLVM: %[[X:.*]] = alloca i8
+// LLVM: %[[ZEXT0:.*]] = zext i1 %[[ARG0]] to i8
+// LLVM: store i8 %[[ZEXT0]], ptr %[[A_ADDR]]
+// LLVM: %[[ZEXT1:.*]] = zext i1 %[[ARG1]] to i8
+// LLVM: store i8 %[[ZEXT1]], ptr %[[B_ADDR]]
+// LLVM: %[[A_VAL:.*]] = load i8, ptr %[[A_ADDR]]
+// LLVM: %[[A_BOOL:.*]] = trunc i8 %[[A_VAL]] to i1
+// LLVM: br i1 %[[A_BOOL]], label %[[AND_TRUE:.+]], label %[[AND_FALSE:.+]]
+// LLVM: [[AND_TRUE]]:
+// LLVM: %[[B_VAL:.*]] = load i8, ptr %[[B_ADDR]]
+// LLVM: %[[B_BOOL:.*]] = trunc i8 %[[B_VAL]] to i1
+// LLVM: br label %[[AND_MERGE:.+]]
+// LLVM: [[AND_FALSE]]:
+// LLVM: br label %[[AND_MERGE]]
+// LLVM: [[AND_MERGE]]:
+// LLVM: %[[AND_PHI:.*]] = phi i1 [ false, %[[AND_FALSE]] ], [ %[[B_BOOL]], %[[AND_TRUE]] ]
+// LLVM: %[[ZEXT_AND:.*]] = zext i1 %[[AND_PHI]] to i8
+// LLVM: store i8 %[[ZEXT_AND]], ptr %[[X]]
+// LLVM: %[[X_VAL:.*]] = load i8, ptr %[[X]]
+// LLVM: %[[X_BOOL:.*]] = trunc i8 %[[X_VAL]] to i1
+// LLVM: br i1 %[[X_BOOL]], label %[[OR_TRUE:.+]], label %[[OR_FALSE:.+]]
+// LLVM: [[OR_TRUE]]:
+// LLVM: br label %[[OR_MERGE:.+]]
+// LLVM: [[OR_FALSE]]:
+// LLVM: %[[B_VAL2:.*]] = load i8, ptr %[[B_ADDR]]
+// LLVM: %[[B_BOOL2:.*]] = trunc i8 %[[B_VAL2]] to i1
+// LLVM: br label %[[OR_MERGE]]
+// LLVM: [[OR_MERGE]]:
+// LLVM: %[[OR_PHI:.*]] = phi i1 [ %[[B_BOOL2]], %[[OR_FALSE]] ], [ true, %[[OR_TRUE]] ]
+// LLVM: %[[ZEXT_OR:.*]] = zext i1 %[[OR_PHI]] to i8
+// LLVM: store i8 %[[ZEXT_OR]], ptr %[[X]]
+// LLVM: ret void
+
+// OGCG-LABEL: define dso_local void @_Z2b1bb
+// OGCG-SAME: (i1 {{.*}} %[[ARG0:.+]], i1 {{.*}} %[[ARG1:.+]])
+// OGCG: [[ENTRY:.*]]:
+// OGCG: %[[A_ADDR:.*]] = alloca i8
+// OGCG: %[[B_ADDR:.*]] = alloca i8
+// OGCG: %[[X:.*]] = alloca i8
+// OGCG: %[[ZEXT0:.*]] = zext i1 %[[ARG0]] to i8
+// OGCG: store i8 %[[ZEXT0]], ptr %[[A_ADDR]]
+// OGCG: %[[ZEXT1:.*]] = zext i1 %[[ARG1]] to i8
+// OGCG: store i8 %[[ZEXT1]], ptr %[[B_ADDR]]
+// OGCG: %[[A_VAL:.*]] = load i8, ptr %[[A_ADDR]]
+// OGCG: %[[A_BOOL:.*]] = trunc i8 %[[A_VAL]] to i1
+// OGCG: br i1 %[[A_BOOL]], label %[[AND_TRUE:.+]], label %[[AND_MERGE:.+]]
+// OGCG: [[AND_TRUE]]:
+// OGCG: %[[B_VAL:.*]] = load i8, ptr %[[B_ADDR]]
+// OGCG: %[[B_BOOL:.*]] = trunc i8 %[[B_VAL]] to i1
+// OGCG: br label %[[AND_MERGE:.+]]
+// OGCG: [[AND_MERGE]]:
+// OGCG: %[[AND_PHI:.*]] = phi i1 [ false, %[[ENTRY]] ], [ %[[B_BOOL]], %[[AND_TRUE]] ]
+// OGCG: %[[ZEXT_AND:.*]] = zext i1 %[[AND_PHI]] to i8
+// OGCG: store i8 %[[ZEXT_AND]], ptr %[[X]]
+// OGCG: %[[X_VAL:.*]] = load i8, ptr %[[X]]
+// OGCG: %[[X_BOOL:.*]] = trunc i8 %[[X_VAL]] to i1
+// OGCG: br i1 %[[X_BOOL]], label %[[OR_MERGE:.+]], label %[[OR_FALSE:.+]]
+// OGCG: [[OR_FALSE]]:
+// OGCG: %[[B_VAL2:.*]] = load i8, ptr %[[B_ADDR]]
+// OGCG: %[[B_BOOL2:.*]] = trunc i8 %[[B_VAL2]] to i1
+// OGCG: br label %[[OR_MERGE]]
+// OGCG: [[OR_MERGE]]:
+// OGCG: %[[OR_PHI:.*]] = phi i1 [ true, %[[AND_MERGE]] ], [ %[[B_BOOL2]], %[[OR_FALSE]] ]
+// OGCG: %[[ZEXT_OR:.*]] = zext i1 %[[OR_PHI]] to i8
+// OGCG: store i8 %[[ZEXT_OR]], ptr %[[X]]
+// OGCG: ret void
+
+void b3(int a, int b, int c, int d) {
+ bool x = (a == b) && (c == d);
+ x = (a == b) || (c == d);
+}
+
+// CIR-LABEL: cir.func @_Z2b3iiii(
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}, %[[ARG2:.*]]: !s32i {{.*}}, %[[ARG3:.*]]: !s32i {{.*}})
+// CIR: [[A:%[0-9]+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: [[B:%[0-9]+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: [[C:%[0-9]+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["c", init]
+// CIR: [[D:%[0-9]+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["d", init]
+// CIR: [[X:%[0-9]+]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["x", init]
+// CIR: cir.store %[[ARG0]], [[A]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store %[[ARG1]], [[B]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store %[[ARG2]], [[C]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store %[[ARG3]], [[D]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[AVAL1:%[0-9]+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[BVAL1:%[0-9]+]] = cir.load align(4) [[B]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[CMP1:%[0-9]+]] = cir.cmp(eq, [[AVAL1]], [[BVAL1]]) : !s32i, !cir.bool
+// CIR: [[AND_RESULT:%[0-9]+]] = cir.ternary([[CMP1]], true {
+// CIR: [[CVAL1:%[0-9]+]] = cir.load align(4) [[C]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[DVAL1:%[0-9]+]] = cir.load align(4) [[D]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[CMP2:%[0-9]+]] = cir.cmp(eq, [[CVAL1]], [[DVAL1]]) : !s32i, !cir.bool
+// CIR: cir.yield [[CMP2]] : !cir.bool
+// CIR: }, false {
+// CIR: [[FALSE:%[0-9]+]] = cir.const #false
+// CIR: cir.yield [[FALSE]] : !cir.bool
+// CIR: }) : (!cir.bool) -> !cir.bool
+// CIR: cir.store align(1) [[AND_RESULT]], [[X]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: [[AVAL2:%[0-9]+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[BVAL2:%[0-9]+]] = cir.load align(4) [[B]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[CMP3:%[0-9]+]] = cir.cmp(eq, [[AVAL2]], [[BVAL2]]) : !s32i, !cir.bool
+// CIR: [[OR_RESULT:%[0-9]+]] = cir.ternary([[CMP3]], true {
+// CIR: [[TRUE:%[0-9]+]] = cir.const #true
+// CIR: cir.yield [[TRUE]] : !cir.bool
+// CIR: }, false {
+// CIR: [[CVAL2:%[0-9]+]] = cir.load align(4) [[C]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[DVAL2:%[0-9]+]] = cir.load align(4) [[D]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[CMP4:%[0-9]+]] = cir.cmp(eq, [[CVAL2]], [[DVAL2]]) : !s32i, !cir.bool
+// CIR: cir.yield [[CMP4]] : !cir.bool
+// CIR: }) : (!cir.bool) -> !cir.bool
+// CIR: cir.store align(1) [[OR_RESULT]], [[X]] : !cir.bool, !cir.ptr<!cir.bool>
+// CIR: cir.return
+
+
+// LLVM-LABEL: define void @_Z2b3iiii(
+// LLVM-SAME: i32 %[[ARG0:.+]], i32 %[[ARG1:.+]], i32 %[[ARG2:.+]], i32 %[[ARG3:.+]])
+// LLVM: %[[A_ADDR:.*]] = alloca i32, i64 1
+// LLVM: %[[B_ADDR:.*]] = alloca i32, i64 1
+// LLVM: %[[C_ADDR:.*]] = alloca i32, i64 1
+// LLVM: %[[D_ADDR:.*]] = alloca i32, i64 1
+// LLVM: %[[X:.*]] = alloca i8, i64 1
+// LLVM: store i32 %[[ARG0]], ptr %[[A_ADDR]]
+// LLVM: store i32 %[[ARG1]], ptr %[[B_ADDR]]
+// LLVM: store i32 %[[ARG2]], ptr %[[C_ADDR]]
+// LLVM: store i32 %[[ARG3]], ptr %[[D_ADDR]]
+// LLVM: %[[A_VAL:.*]] = load i32, ptr %[[A_ADDR]]
+// LLVM: %[[B_VAL:.*]] = load i32, ptr %[[B_ADDR]]
+// LLVM: %[[CMP1:.*]] = icmp eq i32 %[[A_VAL]], %[[B_VAL]]
+// LLVM: br i1 %[[CMP1]], label %[[AND_TRUE:.+]], label %[[AND_FALSE:.+]]
+// LLVM: [[AND_TRUE]]:
+// LLVM: %[[C_VAL:.*]] = load i32, ptr %[[C_ADDR]]
+// LLVM: %[[D_VAL:.*]] = load i32, ptr %[[D_ADDR]]
+// LLVM: %[[CMP2:.*]] = icmp eq i32 %[[C_VAL]], %[[D_VAL]]
+// LLVM: br label %[[AND_MERGE:.+]]
+// LLVM: [[AND_FALSE]]:
+// LLVM: br label %[[AND_MERGE]]
+// LLVM: [[AND_MERGE]]:
+// LLVM: %[[AND_PHI:.*]] = phi i1 [ false, %[[AND_FALSE]] ], [ %[[CMP2]], %[[AND_TRUE]] ]
+// LLVM: %[[ZEXT_AND:.*]] = zext i1 %[[AND_PHI]] to i8
+// LLVM: store i8 %[[ZEXT_AND]], ptr %[[X]]
+// LLVM: %[[A_VAL2:.*]] = load i32, ptr %[[A_ADDR]]
+// LLVM: %[[B_VAL2:.*]] = load i32, ptr %[[B_ADDR]]
+// LLVM: %[[CMP3:.*]] = icmp eq i32 %[[A_VAL2]], %[[B_VAL2]]
+// LLVM: br i1 %[[CMP3]], label %[[OR_TRUE:.+]], label %[[OR_FALSE:.+]]
+// LLVM: [[OR_TRUE]]:
+// LLVM: br label %[[OR_MERGE:.+]]
+// LLVM: [[OR_FALSE]]:
+// LLVM: %[[C_VAL2:.*]] = load i32, ptr %[[C_ADDR]]
+// LLVM: %[[D_VAL2:.*]] = load i32, ptr %[[D_ADDR]]
+// LLVM: %[[CMP4:.*]] = icmp eq i32 %[[C_VAL2]], %[[D_VAL2]]
+// LLVM: br label %[[OR_MERGE]]
+// LLVM: [[OR_MERGE]]:
+// LLVM: %[[OR_PHI:.*]] = phi i1 [ %[[CMP4]], %[[OR_FALSE]] ], [ true, %[[OR_TRUE]] ]
+// LLVM: %[[ZEXT_OR:.*]] = zext i1 %[[OR_PHI]] to i8
+// LLVM: store i8 %[[ZEXT_OR]], ptr %[[X]]
+// LLVM: ret void
+
+// OGCG-LABEL: define dso_local void @_Z2b3iiii(
+// OGCG-SAME: i32 {{.*}} %[[ARG0:.+]], i32 {{.*}} %[[ARG1:.+]], i32 {{.*}} %[[ARG2:.+]], i32 {{.*}} %[[ARG3:.+]])
+// OGCG: [[ENTRY:.*]]:
+// OGCG: %[[A_ADDR:.*]] = alloca i32
+// OGCG: %[[B_ADDR:.*]] = alloca i32
+// OGCG: %[[C_ADDR:.*]] = alloca i32
+// OGCG: %[[D_ADDR:.*]] = alloca i32
+// OGCG: %[[X:.*]] = alloca i8
+// OGCG: store i32 %[[ARG0]], ptr %[[A_ADDR]]
+// OGCG: store i32 %[[ARG1]], ptr %[[B_ADDR]]
+// OGCG: store i32 %[[ARG2]], ptr %[[C_ADDR]]
+// OGCG: store i32 %[[ARG3]], ptr %[[D_ADDR]]
+// OGCG: %[[A_VAL:.*]] = load i32, ptr %[[A_ADDR]]
+// OGCG: %[[B_VAL:.*]] = load i32, ptr %[[B_ADDR]]
+// OGCG: %[[CMP1:.*]] = icmp eq i32 %[[A_VAL]], %[[B_VAL]]
+// OGCG: br i1 %[[CMP1]], label %[[AND_TRUE:.+]], label %[[AND_MERGE:.+]]
+// OGCG: [[AND_TRUE]]:
+// OGCG: %[[C_VAL:.*]] = load i32, ptr %[[C_ADDR]]
+// OGCG: %[[D_VAL:.*]] = load i32, ptr %[[D_ADDR]]
+// OGCG: %[[CMP2:.*]] = icmp eq i32 %[[C_VAL]], %[[D_VAL]]
+// OGCG: br label %[[AND_MERGE:.+]]
+// OGCG: [[AND_MERGE]]:
+// OGCG: %[[AND_PHI:.*]] = phi i1 [ false, %[[ENTRY]] ], [ %[[CMP2]], %[[AND_TRUE]] ]
+// OGCG: %[[ZEXT_AND:.*]] = zext i1 %[[AND_PHI]] to i8
+// OGCG: store i8 %[[ZEXT_AND]], ptr %[[X]]
+// OGCG: %[[A_VAL2:.*]] = load i32, ptr %[[A_ADDR]]
+// OGCG: %[[B_VAL2:.*]] = load i32, ptr %[[B_ADDR]]
+// OGCG: %[[CMP3:.*]] = icmp eq i32 %[[A_VAL2]], %[[B_VAL2]]
+// OGCG: br i1 %[[CMP3]], label %[[OR_MERGE:.+]], label %[[OR_FALSE:.+]]
+// OGCG: [[OR_FALSE]]:
+// OGCG: %[[C_VAL2:.*]] = load i32, ptr %[[C_ADDR]]
+// OGCG: %[[D_VAL2:.*]] = load i32, ptr %[[D_ADDR]]
+// OGCG: %[[CMP4:.*]] = icmp eq i32 %[[C_VAL2]], %[[D_VAL2]]
+// OGCG: br label %[[OR_MERGE]]
+// OGCG: [[OR_MERGE]]:
+// OGCG: %[[OR_PHI:.*]] = phi i1 [ true, %[[AND_MERGE]] ], [ %[[CMP4]], %[[OR_FALSE]] ]
+// OGCG: %[[ZEXT_OR:.*]] = zext i1 %[[OR_PHI]] to i8
+// OGCG: store i8 %[[ZEXT_OR]], ptr %[[X]]
+// OGCG: ret void
\ No newline at end of file
diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp
new file mode 100644
index 0000000000000..3b66f7ccdf54f
--- /dev/null
+++ b/clang/test/CIR/CodeGen/ternary.cpp
@@ -0,0 +1,147 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+int x(int y) {
+ return y > 0 ? 3 : 5;
+}
+
+// CIR-LABEL: cir.func @_Z1xi(
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}) -> !s32i {
+// CIR: [[Y:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init] {alignment = 4 : i64}
+// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
+// CIR: cir.store %[[ARG0]], [[Y]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[YVAL:%.+]] = cir.load align(4) [[Y]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[ZERO:%.+]] = cir.const #cir.int<0> : !s32i
+// CIR: [[CMP:%.+]] = cir.cmp(gt, [[YVAL]], [[ZERO]]) : !s32i, !cir.bool
+// CIR: [[THREE:%.+]] = cir.const #cir.int<3> : !s32i
+// CIR: [[FIVE:%.+]] = cir.const #cir.int<5> : !s32i
+// CIR: [[SELECT_RES:%.+]] = cir.select if [[CMP]] then [[THREE]] else [[FIVE]] : (!cir.bool, !s32i, !s32i) -> !s32i
+// CIR: cir.store [[SELECT_RES]], [[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[RETVAL_VAL:%.+]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return [[RETVAL_VAL]] : !s32i
+
+// LLVM-LABEL: define i32 @_Z1xi(
+// LLVM-SAME: i32 %[[ARG0:.+]])
+// LLVM: %[[Y:.*]] = alloca i32
+// LLVM: %[[RETVAL:.*]] = alloca i32
+// LLVM: store i32 %[[ARG0]], ptr %[[Y]]
+// LLVM: %[[YVAL:.*]] = load i32, ptr %[[Y]]
+// LLVM: %[[CMP:.*]] = icmp sgt i32 %[[YVAL]], 0
+// LLVM: %[[SELECT:.*]] = select i1 %[[CMP]], i32 3, i32 5
+// LLVM: store i32 %[[SELECT]], ptr %[[RETVAL]]
+// LLVM: %[[RESULT:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM: ret i32 %[[RESULT]]
+
+// OGCG-LABEL: define dso_local noundef i32 @_Z1xi(
+// OGCG-SAME: i32 {{.*}} %[[ARG0:.+]])
+// OGCG: %[[Y:.*]] = alloca i32
+// OGCG: store i32 %[[ARG0]], ptr %[[Y]]
+// OGCG: %[[YVAL:.*]] = load i32, ptr %[[Y]]
+// OGCG: %[[CMP:.*]] = icmp sgt i32 %[[YVAL]], 0
+// OGCG: %[[SELECT:.*]] = select i1 %[[CMP]], i32 3, i32 5
+// OGCG: ret i32 %[[SELECT]]
+
+int foo(int a, int b) {
+ if (a < b ? 0 : a)
+ return -1;
+ return 0;
+}
+
+// CIR-LABEL: cir.func @_Z3fooii(
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}) -> !s32i {
+// CIR: [[A:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init] {alignment = 4 : i64}
+// CIR: [[B:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init] {alignment = 4 : i64}
+// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
+// CIR: cir.store %[[ARG0]], [[A]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.store %[[ARG1]], [[B]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.scope {
+// CIR: [[ALOAD:%.+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[BLOAD:%.+]] = cir.load align(4) [[B]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[CMP:%.+]] = cir.cmp(lt, [[ALOAD]], [[BLOAD]]) : !s32i, !cir.bool
+// CIR: [[TERNARY_RES:%.+]] = cir.ternary([[CMP]], true {
+// CIR: [[ZERO:%.+]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.yield [[ZERO]] : !s32i
+// CIR: }, false {
+// CIR: [[ALOAD2:%.+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.yield [[ALOAD2]] : !s32i
+// CIR: }) : (!cir.bool) -> !s32i
+// CIR: [[CAST:%.+]] = cir.cast(int_to_bool, [[TERNARY_RES]] : !s32i), !cir.bool
+// CIR: cir.if [[CAST]] {
+// CIR: [[ONE:%.+]] = cir.const #cir.int<1> : !s32i
+// CIR: [[MINUS_ONE:%.+]] = cir.unary(minus, [[ONE]]) nsw : !s32i, !s32i
+// CIR: cir.store [[MINUS_ONE]], [[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[RETVAL_VAL:%.+]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return [[RETVAL_VAL]] : !s32i
+// CIR: }
+// CIR: }
+// CIR: [[ZERO2:%.+]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store [[ZERO2]], [[RETVAL]] : !s32i, !cir.ptr<!s32i>
+// CIR: [[RETVAL_VAL2:%.+]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return [[RETVAL_VAL2]] : !s32i
+
+// LLVM-LABEL: define i32 @_Z3fooii(
+// LLVM-SAME: i32 %[[ARG0:.*]], i32 %[[ARG1:.*]])
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM: %[[RETVAL:.*]] = alloca i32
+// LLVM: store i32 %[[ARG0]], ptr %[[A]]
+// LLVM: store i32 %[[ARG1]], ptr %[[B]]
+// LLVM: br label %[[ENTRY_BB:.*]]
+// LLVM: [[ENTRY_BB]]:
+// LLVM: %[[AVAL:.*]] = load i32, ptr %[[A]]
+// LLVM: %[[BVAL:.*]] = load i32, ptr %[[B]]
+// LLVM: %[[CMP:.*]] = icmp slt i32 %[[AVAL]], %[[BVAL]]
+// LLVM: br i1 %[[CMP]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: %[[AVAL2:.*]] = load i32, ptr %[[A]]
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PHI:.*]] = phi i32 [ %[[AVAL2]], %[[FALSE_BB]] ], [ 0, %[[TRUE_BB]] ]
+// LLVM: %[[COND:.*]] = icmp ne i32 %[[PHI]], 0
+// LLVM: br i1 %[[COND]], label %[[RETURN_MINUS_ONE:.*]], label %[[CONT_BB:.*]]
+// LLVM: [[RETURN_MINUS_ONE]]:
+// LLVM: store i32 -1, ptr %[[RETVAL]]
+// LLVM: %[[RET1:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM: ret i32 %[[RET1]]
+// LLVM: [[CONT_BB]]:
+// LLVM: br label %[[RETURN_ZERO:.*]]
+// LLVM: [[RETURN_ZERO]]:
+// LLVM: store i32 0, ptr %[[RETVAL]]
+// LLVM: %[[RET2:.*]] = load i32, ptr %[[RETVAL]]
+// LLVM: ret i32 %[[RET2]]
+
+// OGCG-LABEL: define dso_local noundef i32 @_Z3fooii(
+// OGCG-SAME: i32 {{.*}} %[[ARG0:.*]], i32 {{.*}} %[[ARG1:.*]])
+// OGCG: %[[RETVAL:.*]] = alloca i32
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG: store i32 %[[ARG0]], ptr %[[A]]
+// OGCG: store i32 %[[ARG1]], ptr %[[B]]
+// OGCG: %[[AVAL:.*]] = load i32, ptr %[[A]]
+// OGCG: %[[BVAL:.*]] = load i32, ptr %[[B]]
+// OGCG: %[[CMP:.*]] = icmp slt i32 %[[AVAL]], %[[BVAL]]
+// OGCG: br i1 %[[CMP]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: %[[AVAL2:.*]] = load i32, ptr %[[A]]
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PHI:.*]] = phi i32 [ 0, %[[TRUE_BB]] ], [ %[[AVAL2]], %[[FALSE_BB]] ]
+// OGCG: %[[COND:.*]] = icmp ne i32 %[[PHI]], 0
+// OGCG: br i1 %[[COND]], label %[[RETURN_MINUS_ONE:.*]], label %[[RETURN_ZERO:.*]]
+// OGCG: [[RETURN_MINUS_ONE]]:
+// OGCG: store i32 -1, ptr %[[RETVAL]]
+// OGCG: br label %[[RETURN:.+]]
+// OGCG: [[RETURN_ZERO]]:
+// OGCG: store i32 0, ptr %[[RETVAL]]
+// OGCG: br label %[[RETURN]]
+// OGCG: [[RETURN]]:
+// OGCG: %[[RET2:.*]] = load i32, ptr %[[RETVAL]]
+// OGCG: ret i32 %[[RET2]]
More information about the cfe-commits
mailing list