[clang] [CIR] Upstream overflow builtins (PR #166643)
Andy Kaylor via cfe-commits
cfe-commits at lists.llvm.org
Fri Nov 21 14:42:17 PST 2025
https://github.com/andykaylor updated https://github.com/llvm/llvm-project/pull/166643
>From 7abb2167def4dfceeb96c4b231a94fd8308c6e08 Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Wed, 5 Nov 2025 13:04:40 -0800
Subject: [PATCH 01/24] [CIR] Upstream overflow builtins
This implements the builtins that handle overflow.
---
.../CIR/Dialect/Builder/CIRBaseBuilder.h | 14 +
clang/include/clang/CIR/Dialect/IR/CIROps.td | 76 ++++
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 194 ++++++++++
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 112 ++++++
clang/test/CIR/CodeGen/builtins-overflow.cpp | 364 ++++++++++++++++++
5 files changed, 760 insertions(+)
create mode 100644 clang/test/CIR/CodeGen/builtins-overflow.cpp
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 3288f5b12c77e..6c1951714ba1f 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -408,6 +408,20 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
callee.getFunctionType().getReturnType(), operands);
}
+ struct BinOpOverflowResults {
+ mlir::Value result;
+ mlir::Value overflow;
+ };
+
+ BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc,
+ cir::IntType resultTy,
+ cir::BinOpOverflowKind kind,
+ mlir::Value lhs, mlir::Value rhs) {
+ auto op =
+ cir::BinOpOverflowOp::create(*this, loc, resultTy, kind, lhs, rhs);
+ return {op.getResult(), op.getOverflow()};
+ }
+
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index dc56db1bbd4ea..328880d6f3581 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1628,6 +1628,82 @@ def CIR_CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> {
let isLLVMLoweringRecursive = true;
}
+//===----------------------------------------------------------------------===//
+// BinOpOverflowOp
+//===----------------------------------------------------------------------===//
+
+def CIR_BinOpOverflowKind : CIR_I32EnumAttr<
+ "BinOpOverflowKind", "checked binary arithmetic operation kind", [
+ I32EnumAttrCase<"Add", 0, "add">,
+ I32EnumAttrCase<"Sub", 1, "sub">,
+ I32EnumAttrCase<"Mul", 2, "mul">
+]>;
+
+def CIR_BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> {
+ let summary = "Perform binary integral arithmetic with overflow checking";
+ let description = [{
+ `cir.binop.overflow` performs binary arithmetic operations with overflow
+ checking on integral operands.
+
+ The `kind` argument specifies the kind of arithmetic operation to perform.
+ It can be either `add`, `sub`, or `mul`. The `lhs` and `rhs` arguments
+ specify the input operands of the arithmetic operation. The types of `lhs`
+ and `rhs` must be the same.
+
+ `cir.binop.overflow` produces two SSA values. `result` is the result of the
+ arithmetic operation truncated to its specified type. `overflow` is a
+ boolean value indicating whether overflow happens during the operation.
+
+ The exact semantic of this operation is as follows:
+
+ - `lhs` and `rhs` are promoted to an imaginary integral type that has
+ infinite precision.
+ - The arithmetic operation is performed on the promoted operands.
+ - The infinite-precision result is truncated to the type of `result`. The
+ truncated result is assigned to `result`.
+ - If the truncated result is equal to the un-truncated result, `overflow`
+ is assigned to false. Otherwise, `overflow` is assigned to true.
+ }];
+
+ let arguments = (ins
+ CIR_BinOpOverflowKind:$kind,
+ CIR_IntType:$lhs,
+ CIR_IntType:$rhs
+ );
+
+ let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow);
+
+ let assemblyFormat = [{
+ `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,`
+ `(` type($result) `,` type($overflow) `)`
+ attr-dict
+ }];
+
+ let builders = [
+ OpBuilder<(ins "cir::IntType":$resultTy,
+ "cir::BinOpOverflowKind":$kind,
+ "mlir::Value":$lhs,
+ "mlir::Value":$rhs), [{
+ auto overflowTy = cir::BoolType::get($_builder.getContext());
+ build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs);
+ }]>
+ ];
+
+ let extraLLVMLoweringPatternDecl = [{
+ static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind,
+ bool isSigned, unsigned width);
+
+ struct EncompassedTypeInfo {
+ bool sign;
+ unsigned width;
+ };
+
+ static EncompassedTypeInfo computeEncompassedTypeWidth(cir::IntType operandTy,
+ cir::IntType resultTy);
+ }];
+}
+
+
//===----------------------------------------------------------------------===//
// BinOp
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index d9b9e3b877b50..19ce15ca5aeeb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -58,6 +58,52 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
return RValue::get(result);
}
+namespace {
+struct WidthAndSignedness {
+ unsigned Width;
+ bool Signed;
+};
+} // namespace
+
+static WidthAndSignedness
+getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
+ const clang::QualType Type) {
+ assert(Type->isIntegerType() && "Given type is not an integer.");
+ unsigned Width = Type->isBooleanType() ? 1
+ : Type->isBitIntType() ? astContext.getIntWidth(Type)
+ : astContext.getTypeInfo(Type).Width;
+ bool Signed = Type->isSignedIntegerType();
+ return {Width, Signed};
+}
+
+// Given one or more integer types, this function produces an integer type that
+// encompasses them: any value in one of the given types could be expressed in
+// the encompassing type.
+static struct WidthAndSignedness
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
+ assert(Types.size() > 0 && "Empty list of types.");
+
+ // If any of the given types is signed, we must return a signed type.
+ bool Signed = false;
+ for (const auto &Type : Types) {
+ Signed |= Type.Signed;
+ }
+
+ // The encompassing type must have a width greater than or equal to the width
+ // of the specified types. Additionally, if the encompassing type is signed,
+ // its width must be strictly greater than the width of any unsigned types
+ // given.
+ unsigned Width = 0;
+ for (const auto &Type : Types) {
+ unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
+ if (Width < MinWidth) {
+ Width = MinWidth;
+ }
+ }
+
+ return {Width, Signed};
+}
+
RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
mlir::Value input = emitScalarExpr(e->getArg(0));
mlir::Value amount = emitScalarExpr(e->getArg(1));
@@ -491,6 +537,154 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow: {
+ const clang::Expr *LeftArg = e->getArg(0);
+ const clang::Expr *RightArg = e->getArg(1);
+ const clang::Expr *ResultArg = e->getArg(2);
+
+ clang::QualType ResultQTy =
+ ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+
+ WidthAndSignedness LeftInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), LeftArg->getType());
+ WidthAndSignedness RightInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), RightArg->getType());
+ WidthAndSignedness ResultInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), ResultQTy);
+
+ // Note we compute the encompassing type with the consideration to the
+ // result type, so later in LLVM lowering we don't get redundant integral
+ // extension casts.
+ WidthAndSignedness EncompassingInfo =
+ EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+
+ auto EncompassingCIRTy = cir::IntType::get(
+ &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed);
+ auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+
+ mlir::Value Left = emitScalarExpr(LeftArg);
+ mlir::Value Right = emitScalarExpr(RightArg);
+ Address ResultPtr = emitPointerWithAlignment(ResultArg);
+
+ // Extend each operand to the encompassing type, if necessary.
+ if (Left.getType() != EncompassingCIRTy)
+ Left =
+ builder.createCast(cir::CastKind::integral, Left, EncompassingCIRTy);
+ if (Right.getType() != EncompassingCIRTy)
+ Right =
+ builder.createCast(cir::CastKind::integral, Right, EncompassingCIRTy);
+
+ // Perform the operation on the extended values.
+ cir::BinOpOverflowKind OpKind;
+ switch (builtinID) {
+ default:
+ llvm_unreachable("Unknown overflow builtin id.");
+ case Builtin::BI__builtin_add_overflow:
+ OpKind = cir::BinOpOverflowKind::Add;
+ break;
+ case Builtin::BI__builtin_sub_overflow:
+ OpKind = cir::BinOpOverflowKind::Sub;
+ break;
+ case Builtin::BI__builtin_mul_overflow:
+ OpKind = cir::BinOpOverflowKind::Mul;
+ break;
+ }
+
+ auto Loc = getLoc(e->getSourceRange());
+ auto ArithResult =
+ builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right);
+
+ // Here is a slight difference from the original clang CodeGen:
+ // - In the original clang CodeGen, the checked arithmetic result is
+ // first computed as a value of the encompassing type, and then it is
+ // truncated to the actual result type with a second overflow checking.
+ // - In CIRGen, the checked arithmetic operation directly produce the
+ // checked arithmetic result in its expected type.
+ //
+ // So we don't need a truncation and a second overflow checking here.
+
+ // Finally, store the result using the pointer.
+ bool isVolatile =
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
+ builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+ ResultPtr, isVolatile);
+
+ return RValue::get(ArithResult.overflow);
+ }
+
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow: {
+ // Scalarize our inputs.
+ mlir::Value X = emitScalarExpr(e->getArg(0));
+ mlir::Value Y = emitScalarExpr(e->getArg(1));
+
+ const clang::Expr *ResultArg = e->getArg(2);
+ Address ResultPtr = emitPointerWithAlignment(ResultArg);
+
+ // Decide which of the arithmetic operation we are lowering to:
+ cir::BinOpOverflowKind ArithKind;
+ switch (builtinID) {
+ default:
+ llvm_unreachable("Unknown overflow builtin id.");
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ ArithKind = cir::BinOpOverflowKind::Add;
+ break;
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ ArithKind = cir::BinOpOverflowKind::Sub;
+ break;
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ ArithKind = cir::BinOpOverflowKind::Mul;
+ break;
+ }
+
+ clang::QualType ResultQTy =
+ ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+ auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+
+ auto Loc = getLoc(e->getSourceRange());
+ auto ArithResult =
+ builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y);
+
+ bool isVolatile =
+ ResultArg->getType()->getPointeeType().isVolatileQualified();
+ builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+ ResultPtr, isVolatile);
+
+ return RValue::get(ArithResult.overflow);
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index d94108294a9a3..c81f7cc657137 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2503,6 +2503,118 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite(
return cmpOp.emitError() << "unsupported type for CmpOp: " << type;
}
+mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
+ cir::BinOpOverflowOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto loc = op.getLoc();
+ auto arithKind = op.getKind();
+ auto operandTy = op.getLhs().getType();
+ auto resultTy = op.getResult().getType();
+
+ auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy);
+ auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width);
+
+ auto lhs = adaptor.getLhs();
+ auto rhs = adaptor.getRhs();
+ if (operandTy.getWidth() < encompassedTyInfo.width) {
+ if (operandTy.isSigned()) {
+ lhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, lhs);
+ rhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, rhs);
+ } else {
+ lhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, lhs);
+ rhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, rhs);
+ }
+ }
+
+ auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign,
+ encompassedTyInfo.width);
+ auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName);
+
+ auto overflowLLVMTy = rewriter.getI1Type();
+ auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
+ rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
+
+ auto callLLVMIntrinOp = rewriter.create<mlir::LLVM::CallIntrinsicOp>(
+ loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
+ auto intrinRet = callLLVMIntrinOp.getResult(0);
+
+ auto result = rewriter
+ .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
+ ArrayRef<int64_t>{0})
+ .getResult();
+ auto overflow = rewriter
+ .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
+ ArrayRef<int64_t>{1})
+ .getResult();
+
+ if (resultTy.getWidth() < encompassedTyInfo.width) {
+ auto resultLLVMTy = getTypeConverter()->convertType(resultTy);
+ auto truncResult =
+ rewriter.create<mlir::LLVM::TruncOp>(loc, resultLLVMTy, result);
+
+ // Extend the truncated result back to the encompassing type to check for
+ // any overflows during the truncation.
+ mlir::Value truncResultExt;
+ if (resultTy.isSigned())
+ truncResultExt = rewriter.create<mlir::LLVM::SExtOp>(
+ loc, encompassedLLVMTy, truncResult);
+ else
+ truncResultExt = rewriter.create<mlir::LLVM::ZExtOp>(
+ loc, encompassedLLVMTy, truncResult);
+ auto truncOverflow = rewriter.create<mlir::LLVM::ICmpOp>(
+ loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result);
+
+ result = truncResult;
+ overflow = rewriter.create<mlir::LLVM::OrOp>(loc, overflow, truncOverflow);
+ }
+
+ auto boolLLVMTy = getTypeConverter()->convertType(op.getOverflow().getType());
+ if (boolLLVMTy != rewriter.getI1Type())
+ overflow = rewriter.create<mlir::LLVM::ZExtOp>(loc, boolLLVMTy, overflow);
+
+ rewriter.replaceOp(op, mlir::ValueRange{result, overflow});
+
+ return mlir::success();
+}
+
+std::string CIRToLLVMBinOpOverflowOpLowering::getLLVMIntrinName(
+ cir::BinOpOverflowKind opKind, bool isSigned, unsigned width) {
+ // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}`
+
+ std::string name = "llvm.";
+
+ if (isSigned)
+ name.push_back('s');
+ else
+ name.push_back('u');
+
+ switch (opKind) {
+ case cir::BinOpOverflowKind::Add:
+ name.append("add.");
+ break;
+ case cir::BinOpOverflowKind::Sub:
+ name.append("sub.");
+ break;
+ case cir::BinOpOverflowKind::Mul:
+ name.append("mul.");
+ break;
+ }
+
+ name.append("with.overflow.i");
+ name.append(std::to_string(width));
+
+ return name;
+}
+
+CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo
+CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth(
+ cir::IntType operandTy, cir::IntType resultTy) {
+ auto sign = operandTy.getIsSigned() || resultTy.getIsSigned();
+ auto width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
+ resultTy.getWidth() + (sign && resultTy.isUnsigned()));
+ return {sign, width};
+}
+
mlir::LogicalResult CIRToLLVMShiftOpLowering::matchAndRewrite(
cir::ShiftOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp
new file mode 100644
index 0000000000000..8cd227d58686d
--- /dev/null
+++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp
@@ -0,0 +1,364 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir
+
+bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_add_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z32test_add_overflow_uint_uint_uintjjPj
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_add_overflow_int_int_int(int x, int y, int *res) {
+ return __builtin_add_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z29test_add_overflow_int_int_intiiPi
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) {
+ return __builtin_add_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
+// CIR: }
+
+bool test_sub_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_sub_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z32test_sub_overflow_uint_uint_uintjjPj
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_sub_overflow_int_int_int(int x, int y, int *res) {
+ return __builtin_sub_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z29test_sub_overflow_int_int_intiiPi
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) {
+ return __builtin_sub_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
+// CIR: }
+
+bool test_mul_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_mul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z32test_mul_overflow_uint_uint_uintjjPj
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_mul_overflow_int_int_int(int x, int y, int *res) {
+ return __builtin_mul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z29test_mul_overflow_int_int_intiiPi
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitInt(31) *res) {
+ return __builtin_mul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
+// CIR: }
+
+bool test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y, unsigned long *res) {
+ return __builtin_mul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z34test_mul_overflow_ulong_ulong_longmmPm
+// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) {
+ return __builtin_add_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z30test_add_overflow_uint_int_intjiPi
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[#PROM_X:]] = cir.cast integral %[[#X]] : !u32i -> !cir.int<s, 33>
+// CIR-NEXT: %[[#PROM_Y:]] = cir.cast integral %[[#Y]] : !s32i -> !cir.int<s, 33>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : <s, 33>, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_add_overflow_volatile(int x, int y, volatile int *res) {
+ return __builtin_add_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z26test_add_overflow_volatileiiPVi
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store volatile{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_uadd_overflow(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_uadd_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_uadd_overflowjjPj
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_uaddl_overflow(unsigned long x, unsigned long y, unsigned long *res) {
+ return __builtin_uaddl_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_uaddl_overflowmmPm
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) {
+ return __builtin_uaddll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_uaddll_overflowyyPy
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_usub_overflow(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_usub_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_usub_overflowjjPj
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_usubl_overflow(unsigned long x, unsigned long y, unsigned long *res) {
+ return __builtin_usubl_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_usubl_overflowmmPm
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_usubll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) {
+ return __builtin_usubll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_usubll_overflowyyPy
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_umul_overflow(unsigned x, unsigned y, unsigned *res) {
+ return __builtin_umul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_umul_overflowjjPj
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+// CIR: }
+
+bool test_umull_overflow(unsigned long x, unsigned long y, unsigned long *res) {
+ return __builtin_umull_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_umull_overflowmmPm
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_umulll_overflow(unsigned long long x, unsigned long long y, unsigned long long *res) {
+ return __builtin_umulll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_umulll_overflowyyPy
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+// CIR: }
+
+bool test_sadd_overflow(int x, int y, int *res) {
+ return __builtin_sadd_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_sadd_overflowiiPi
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_saddl_overflow(long x, long y, long *res) {
+ return __builtin_saddl_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_saddl_overflowllPl
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
+
+bool test_saddll_overflow(long long x, long long y, long long *res) {
+ return __builtin_saddll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_saddll_overflowxxPx
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
+
+bool test_ssub_overflow(int x, int y, int *res) {
+ return __builtin_ssub_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_ssub_overflowiiPi
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_ssubl_overflow(long x, long y, long *res) {
+ return __builtin_ssubl_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_ssubl_overflowllPl
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
+
+bool test_ssubll_overflow(long long x, long long y, long long *res) {
+ return __builtin_ssubll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_ssubll_overflowxxPx
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
+
+bool test_smul_overflow(int x, int y, int *res) {
+ return __builtin_smul_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z18test_smul_overflowiiPi
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+// CIR: }
+
+bool test_smull_overflow(long x, long y, long *res) {
+ return __builtin_smull_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z19test_smull_overflowllPl
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
+
+bool test_smulll_overflow(long long x, long long y, long long *res) {
+ return __builtin_smulll_overflow(x, y, res);
+}
+
+// CIR: cir.func dso_local @_Z20test_smulll_overflowxxPx
+// CIR: %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], %[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+// CIR: }
>From d03208983759d5fcffc1531b5c0c99aa57275e7f Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 15:48:57 -0600
Subject: [PATCH 02/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 19ce15ca5aeeb..c56ae5ac9028b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -93,13 +93,9 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
// of the specified types. Additionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
- unsigned Width = 0;
- for (const auto &Type : Types) {
- unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
- if (Width < MinWidth) {
- Width = MinWidth;
- }
- }
+unsigned Width = 0;
+for (const auto &Type : Types)
+ Width = std::max(Width, Type.Width + (Signed && !Type.Signed));
return {Width, Signed};
}
>From 7d73c087a161cfe1f8dd9f03ac2f8e28bce62018 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 15:49:26 -0600
Subject: [PATCH 03/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index c56ae5ac9028b..ce53b4039e1f9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -84,10 +84,7 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
assert(Types.size() > 0 && "Empty list of types.");
// If any of the given types is signed, we must return a signed type.
- bool Signed = false;
- for (const auto &Type : Types) {
- Signed |= Type.Signed;
- }
+bool Signed = llvm::any_of(Types, [](const auto &T) { return T.Signed; });
// The encompassing type must have a width greater than or equal to the width
// of the specified types. Additionally, if the encompassing type is signed,
>From 848fbe751dd73e588b95d8b3781df622f7e56c13 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 15:57:21 -0600
Subject: [PATCH 04/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ce53b4039e1f9..6caad2c15fb86 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -67,7 +67,7 @@ struct WidthAndSignedness {
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
- const clang::QualType Type) {
+ const clang::QualType type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1
: Type->isBitIntType() ? astContext.getIntWidth(Type)
>From d42d9281957c4533063bcd0eb286cf654b2fa2f0 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 15:57:36 -0600
Subject: [PATCH 05/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 6caad2c15fb86..be944d0bd9ce0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -72,7 +72,7 @@ getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
unsigned Width = Type->isBooleanType() ? 1
: Type->isBitIntType() ? astContext.getIntWidth(Type)
: astContext.getTypeInfo(Type).Width;
- bool Signed = Type->isSignedIntegerType();
+ bool signed = Type->isSignedIntegerType();
return {Width, Signed};
}
>From e2b36299c550f3b25fd84420f099b3ba436083e5 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 18:06:14 -0600
Subject: [PATCH 06/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index be944d0bd9ce0..cd0fd5177297b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -60,8 +60,8 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
namespace {
struct WidthAndSignedness {
- unsigned Width;
- bool Signed;
+ unsigned width;
+ bool signed;
};
} // namespace
>From f77a61d165ea31e989808d66d5fb1ba1ec523776 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 18:06:37 -0600
Subject: [PATCH 07/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index cd0fd5177297b..f965450fc7d89 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -69,7 +69,7 @@ static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
const clang::QualType type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
- unsigned Width = Type->isBooleanType() ? 1
+ unsigned width = type->isBooleanType() ? 1
: Type->isBitIntType() ? astContext.getIntWidth(Type)
: astContext.getTypeInfo(Type).Width;
bool signed = Type->isSignedIntegerType();
>From 781e7bb0ac38bd974d7ae9b6137d2b995e989762 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Tue, 18 Nov 2025 18:07:32 -0600
Subject: [PATCH 08/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index c81f7cc657137..f1cab4c4ebbbe 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2534,7 +2534,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
- auto callLLVMIntrinOp = rewriter.create<mlir::LLVM::CallIntrinsicOp>(
+ auto callLLVMIntrinOp = mlir::LLVM::CallIntrinsicOp::create(rewriter,
loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
auto intrinRet = callLLVMIntrinOp.getResult(0);
>From 3b01b5a20b7961fbb485f92ebaef8c39482a4cf9 Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Tue, 18 Nov 2025 16:05:17 -0800
Subject: [PATCH 09/24] [CIR] Remove createBinOpOverflowOp helper function
Remove the BinOpOverflowResults struct and createBinOpOverflowOp helper
function from CIRBaseBuilder. Instead, call cir::BinOpOverflowOp::create
directly and use getResult() and getOverflow() on the returned operation.
This simplifies the API and makes it more natural to use, as suggested
by reviewer feedback.
---
.../clang/CIR/Dialect/Builder/CIRBaseBuilder.h | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 6c1951714ba1f..3288f5b12c77e 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -408,20 +408,6 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
callee.getFunctionType().getReturnType(), operands);
}
- struct BinOpOverflowResults {
- mlir::Value result;
- mlir::Value overflow;
- };
-
- BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc,
- cir::IntType resultTy,
- cir::BinOpOverflowKind kind,
- mlir::Value lhs, mlir::Value rhs) {
- auto op =
- cir::BinOpOverflowOp::create(*this, loc, resultTy, kind, lhs, rhs);
- return {op.getResult(), op.getOverflow()};
- }
-
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
>From 05600b8d9ee33478f707de4fec8e18aff8ca6a1c Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Tue, 18 Nov 2025 16:23:58 -0800
Subject: [PATCH 10/24] [CIR] Fix style and deprecated API in overflow builtins
lowering
Replace auto with explicit types and use lowerCamelCase.
Replace deprecated rewriter.create with Op::create.
---
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 89 ++++++++++---------
1 file changed, 48 insertions(+), 41 deletions(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index f1cab4c4ebbbe..1896b89f6758f 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2506,71 +2506,78 @@ mlir::LogicalResult CIRToLLVMCmpOpLowering::matchAndRewrite(
mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
cir::BinOpOverflowOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
- auto loc = op.getLoc();
- auto arithKind = op.getKind();
- auto operandTy = op.getLhs().getType();
- auto resultTy = op.getResult().getType();
+ mlir::Location loc = op.getLoc();
+ cir::BinOpOverflowKind arithKind = op.getKind();
+ cir::IntType operandTy = op.getLhs().getType();
+ cir::IntType resultTy = op.getResult().getType();
- auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy);
- auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width);
+ EncompassedTypeInfo encompassedTyInfo =
+ computeEncompassedTypeWidth(operandTy, resultTy);
+ mlir::IntegerType encompassedLLVMTy =
+ rewriter.getIntegerType(encompassedTyInfo.width);
- auto lhs = adaptor.getLhs();
- auto rhs = adaptor.getRhs();
+ mlir::Value lhs = adaptor.getLhs();
+ mlir::Value rhs = adaptor.getRhs();
if (operandTy.getWidth() < encompassedTyInfo.width) {
if (operandTy.isSigned()) {
- lhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, lhs);
- rhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, rhs);
+ lhs = mlir::LLVM::SExtOp::create(rewriter, loc, encompassedLLVMTy, lhs);
+ rhs = mlir::LLVM::SExtOp::create(rewriter, loc, encompassedLLVMTy, rhs);
} else {
- lhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, lhs);
- rhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, rhs);
+ lhs = mlir::LLVM::ZExtOp::create(rewriter, loc, encompassedLLVMTy, lhs);
+ rhs = mlir::LLVM::ZExtOp::create(rewriter, loc, encompassedLLVMTy, rhs);
}
}
- auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign,
- encompassedTyInfo.width);
- auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName);
+ std::string intrinName = getLLVMIntrinName(
+ arithKind, encompassedTyInfo.sign, encompassedTyInfo.width);
+ mlir::StringAttr intrinNameAttr =
+ mlir::StringAttr::get(op.getContext(), intrinName);
- auto overflowLLVMTy = rewriter.getI1Type();
- auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
- rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
+ mlir::IntegerType overflowLLVMTy = rewriter.getI1Type();
+ mlir::LLVM::LLVMStructType intrinRetTy =
+ mlir::LLVM::LLVMStructType::getLiteral(
+ rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
- auto callLLVMIntrinOp = mlir::LLVM::CallIntrinsicOp::create(rewriter,
- loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
- auto intrinRet = callLLVMIntrinOp.getResult(0);
+ mlir::LLVM::CallIntrinsicOp callLLVMIntrinOp =
+ mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, intrinRetTy,
+ intrinNameAttr, mlir::ValueRange{lhs, rhs});
+ mlir::Value intrinRet = callLLVMIntrinOp.getResult(0);
- auto result = rewriter
- .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
- ArrayRef<int64_t>{0})
- .getResult();
- auto overflow = rewriter
- .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
- ArrayRef<int64_t>{1})
- .getResult();
+ mlir::Value result =
+ mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
+ ArrayRef<int64_t>{0})
+ .getResult();
+ mlir::Value overflow =
+ mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
+ ArrayRef<int64_t>{1})
+ .getResult();
if (resultTy.getWidth() < encompassedTyInfo.width) {
- auto resultLLVMTy = getTypeConverter()->convertType(resultTy);
- auto truncResult =
- rewriter.create<mlir::LLVM::TruncOp>(loc, resultLLVMTy, result);
+ mlir::Type resultLLVMTy = getTypeConverter()->convertType(resultTy);
+ mlir::Value truncResult =
+ mlir::LLVM::TruncOp::create(rewriter, loc, resultLLVMTy, result);
// Extend the truncated result back to the encompassing type to check for
// any overflows during the truncation.
mlir::Value truncResultExt;
if (resultTy.isSigned())
- truncResultExt = rewriter.create<mlir::LLVM::SExtOp>(
- loc, encompassedLLVMTy, truncResult);
+ truncResultExt = mlir::LLVM::SExtOp::create(rewriter, loc,
+ encompassedLLVMTy, truncResult);
else
- truncResultExt = rewriter.create<mlir::LLVM::ZExtOp>(
- loc, encompassedLLVMTy, truncResult);
- auto truncOverflow = rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result);
+ truncResultExt = mlir::LLVM::ZExtOp::create(rewriter, loc,
+ encompassedLLVMTy, truncResult);
+ mlir::Value truncOverflow =
+ mlir::LLVM::ICmpOp::create(rewriter, loc, mlir::LLVM::ICmpPredicate::ne,
+ truncResultExt, result);
result = truncResult;
- overflow = rewriter.create<mlir::LLVM::OrOp>(loc, overflow, truncOverflow);
+ overflow = mlir::LLVM::OrOp::create(rewriter, loc, overflow, truncOverflow);
}
- auto boolLLVMTy = getTypeConverter()->convertType(op.getOverflow().getType());
+ mlir::Type boolLLVMTy =
+ getTypeConverter()->convertType(op.getOverflow().getType());
if (boolLLVMTy != rewriter.getI1Type())
- overflow = rewriter.create<mlir::LLVM::ZExtOp>(loc, boolLLVMTy, overflow);
+ overflow = mlir::LLVM::ZExtOp::create(rewriter, loc, boolLLVMTy, overflow);
rewriter.replaceOp(op, mlir::ValueRange{result, overflow});
>From 5c00c3de0958f57ce05dd789d11ecc68fb62ed79 Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Tue, 18 Nov 2025 16:30:51 -0800
Subject: [PATCH 11/24] [CIR] Fix style and remove helper function in overflow
builtins
- Rename 'signed' field to 'isSigned' (signed is a keyword)
- Use lowerCamelCase for all variables and parameters
- Replace createBinOpOverflowOp helper with direct BinOpOverflowOp::create calls
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 44 ++++++++++++-------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index f965450fc7d89..8ac6bc2bdb80d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -61,40 +61,40 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
namespace {
struct WidthAndSignedness {
unsigned width;
- bool signed;
+ bool isSigned;
};
} // namespace
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
const clang::QualType type) {
- assert(Type->isIntegerType() && "Given type is not an integer.");
+ assert(type->isIntegerType() && "Given type is not an integer.");
unsigned width = type->isBooleanType() ? 1
- : Type->isBitIntType() ? astContext.getIntWidth(Type)
- : astContext.getTypeInfo(Type).Width;
- bool signed = Type->isSignedIntegerType();
- return {Width, Signed};
+ : type->isBitIntType() ? astContext.getIntWidth(type)
+ : astContext.getTypeInfo(type).Width;
+ bool isSigned = type->isSignedIntegerType();
+ return {width, isSigned};
}
// Given one or more integer types, this function produces an integer type that
// encompasses them: any value in one of the given types could be expressed in
// the encompassing type.
static struct WidthAndSignedness
-EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
- assert(Types.size() > 0 && "Empty list of types.");
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
+ assert(types.size() > 0 && "Empty list of types.");
// If any of the given types is signed, we must return a signed type.
-bool Signed = llvm::any_of(Types, [](const auto &T) { return T.Signed; });
+ bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
// The encompassing type must have a width greater than or equal to the width
// of the specified types. Additionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
-unsigned Width = 0;
-for (const auto &Type : Types)
- Width = std::max(Width, Type.Width + (Signed && !Type.Signed));
+ unsigned width = 0;
+ for (const auto &type : types)
+ width = std::max(width, type.width + (isSigned && !type.isSigned));
- return {Width, Signed};
+ return {width, isSigned};
}
RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
@@ -554,7 +554,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
auto EncompassingCIRTy = cir::IntType::get(
- &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed);
+ &getMLIRContext(), EncompassingInfo.width, EncompassingInfo.isSigned);
auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
mlir::Value Left = emitScalarExpr(LeftArg);
@@ -586,8 +586,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
}
auto Loc = getLoc(e->getSourceRange());
- auto ArithResult =
- builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right);
+ cir::BinOpOverflowOp ArithOp =
+ cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, OpKind, Left, Right);
// Here is a slight difference from the original clang CodeGen:
// - In the original clang CodeGen, the checked arithmetic result is
@@ -601,10 +601,10 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
// Finally, store the result using the pointer.
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
- builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+ builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
ResultPtr, isVolatile);
- return RValue::get(ArithResult.overflow);
+ return RValue::get(ArithOp.getOverflow());
}
case Builtin::BI__builtin_uadd_overflow:
@@ -668,15 +668,15 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
auto Loc = getLoc(e->getSourceRange());
- auto ArithResult =
- builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y);
+ cir::BinOpOverflowOp ArithOp =
+ cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, ArithKind, X, Y);
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
- builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+ builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
ResultPtr, isVolatile);
- return RValue::get(ArithResult.overflow);
+ return RValue::get(ArithOp.getOverflow());
}
}
>From afa0bda9a75892db753c932632e6c26a72242f4a Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:16:55 -0600
Subject: [PATCH 12/24] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 8ac6bc2bdb80d..a2300dcaef36c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -586,7 +586,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
}
auto Loc = getLoc(e->getSourceRange());
- cir::BinOpOverflowOp ArithOp =
+ auto arithOp =
cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, OpKind, Left, Right);
// Here is a slight difference from the original clang CodeGen:
>From 28d8de42a1054f9b215a7e2ce1f10a75f6056cf3 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:17:32 -0600
Subject: [PATCH 13/24] Update clang/test/CIR/CodeGen/builtins-overflow.cpp
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>
---
clang/test/CIR/CodeGen/builtins-overflow.cpp | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp
index 8cd227d58686d..0014aa960b539 100644
--- a/clang/test/CIR/CodeGen/builtins-overflow.cpp
+++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp
@@ -1,5 +1,9 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck %s --check-prefix=LLVM --input-file=%t-cir.ll
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck %s --check-prefix=OGCG --input-file=%t.ll
bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
return __builtin_add_overflow(x, y, res);
>From 1767a9794da9a4f408ed0500c08712e754726f12 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:17:45 -0600
Subject: [PATCH 14/24] Update clang/include/clang/CIR/Dialect/IR/CIROps.td
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/include/clang/CIR/Dialect/IR/CIROps.td | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 328880d6f3581..acbc3a805ec70 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1674,8 +1674,8 @@ def CIR_BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> {
let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow);
let assemblyFormat = [{
- `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,`
- `(` type($result) `,` type($overflow) `)`
+ `(` $kind `,` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `->`
+ `(` qualified(type($result)) `,` qualified(type($overflow)) `)`
attr-dict
}];
>From 8e7c9a494bb2dd207a1463e335756c7196f6f7ec Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:17:59 -0600
Subject: [PATCH 15/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1896b89f6758f..728f72d27e978 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2534,7 +2534,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
mlir::StringAttr::get(op.getContext(), intrinName);
mlir::IntegerType overflowLLVMTy = rewriter.getI1Type();
- mlir::LLVM::LLVMStructType intrinRetTy =
+ auto intrinRetTy =
mlir::LLVM::LLVMStructType::getLiteral(
rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
>From 03f4133a72fd8c65a7a19c31df973fdde94839a6 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:09 -0600
Subject: [PATCH 16/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 728f72d27e978..d57b4b092d996 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2538,7 +2538,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
mlir::LLVM::LLVMStructType::getLiteral(
rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
- mlir::LLVM::CallIntrinsicOp callLLVMIntrinOp =
+ auto callLLVMIntrinOp =
mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, intrinRetTy,
intrinNameAttr, mlir::ValueRange{lhs, rhs});
mlir::Value intrinRet = callLLVMIntrinOp.getResult(0);
>From 7efb03f47358f71067623b7581a70f13440c5362 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:18 -0600
Subject: [PATCH 17/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index d57b4b092d996..fa0569588dd8c 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2554,7 +2554,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
if (resultTy.getWidth() < encompassedTyInfo.width) {
mlir::Type resultLLVMTy = getTypeConverter()->convertType(resultTy);
- mlir::Value truncResult =
+ auto mlir::Value truncResult =
mlir::LLVM::TruncOp::create(rewriter, loc, resultLLVMTy, result);
// Extend the truncated result back to the encompassing type to check for
>From 1c61733b0ffa0f453fe61874c7e3950f5ed91858 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:27 -0600
Subject: [PATCH 18/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index fa0569588dd8c..3cc90f1a51549 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2566,7 +2566,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
else
truncResultExt = mlir::LLVM::ZExtOp::create(rewriter, loc,
encompassedLLVMTy, truncResult);
- mlir::Value truncOverflow =
+ auto truncOverflow =
mlir::LLVM::ICmpOp::create(rewriter, loc, mlir::LLVM::ICmpPredicate::ne,
truncResultExt, result);
>From b18aedfce1cb00a47a1e8a273cfaeeec2db1dc01 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:35 -0600
Subject: [PATCH 19/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 3cc90f1a51549..a5b46e6d7376e 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2617,7 +2617,7 @@ CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo
CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth(
cir::IntType operandTy, cir::IntType resultTy) {
auto sign = operandTy.getIsSigned() || resultTy.getIsSigned();
- auto width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
+ unsigned width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
resultTy.getWidth() + (sign && resultTy.isUnsigned()));
return {sign, width};
}
>From ebcb5094c16fafd505df20a33e14ea00b1cd53f2 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:45 -0600
Subject: [PATCH 20/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index a5b46e6d7376e..0043df4c11f00 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2616,7 +2616,7 @@ std::string CIRToLLVMBinOpOverflowOpLowering::getLLVMIntrinName(
CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo
CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth(
cir::IntType operandTy, cir::IntType resultTy) {
- auto sign = operandTy.getIsSigned() || resultTy.getIsSigned();
+ bool sign = operandTy.getIsSigned() || resultTy.getIsSigned();
unsigned width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
resultTy.getWidth() + (sign && resultTy.isUnsigned()));
return {sign, width};
>From 27a3db7655ac90826842814e18faeafa009eaac2 Mon Sep 17 00:00:00 2001
From: adams381 <adams at nvidia.com>
Date: Wed, 19 Nov 2025 16:18:54 -0600
Subject: [PATCH 21/24] Update
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Co-authored-by: Henrich Lauko <henrich.lau at gmail.com>
---
clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0043df4c11f00..67725e438cb33 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2530,7 +2530,7 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
std::string intrinName = getLLVMIntrinName(
arithKind, encompassedTyInfo.sign, encompassedTyInfo.width);
- mlir::StringAttr intrinNameAttr =
+ auto intrinNameAttr =
mlir::StringAttr::get(op.getContext(), intrinName);
mlir::IntegerType overflowLLVMTy = rewriter.getI1Type();
>From 6fd2afa3abab1c546f97ee03380123c43fee1285 Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Fri, 21 Nov 2025 10:27:06 -0800
Subject: [PATCH 22/24] [CIR] Fix style: lowercase variables and clang-format
- Convert all variable names to lowerCamelCase per LLVM coding standards
- Fix syntax error: auto mlir::Value -> auto
- Apply clang-format for consistent 80-column line wrapping
- Keep auto usage only where type is obvious from same line
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 108 +++++++++---------
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 51 ++++-----
2 files changed, 77 insertions(+), 82 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index a2300dcaef36c..27e2ae548d1e7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -533,61 +533,61 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow: {
- const clang::Expr *LeftArg = e->getArg(0);
- const clang::Expr *RightArg = e->getArg(1);
- const clang::Expr *ResultArg = e->getArg(2);
+ const clang::Expr *leftArg = e->getArg(0);
+ const clang::Expr *rightArg = e->getArg(1);
+ const clang::Expr *resultArg = e->getArg(2);
- clang::QualType ResultQTy =
- ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+ clang::QualType resultQTy =
+ resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
- WidthAndSignedness LeftInfo =
- getIntegerWidthAndSignedness(cgm.getASTContext(), LeftArg->getType());
- WidthAndSignedness RightInfo =
- getIntegerWidthAndSignedness(cgm.getASTContext(), RightArg->getType());
- WidthAndSignedness ResultInfo =
- getIntegerWidthAndSignedness(cgm.getASTContext(), ResultQTy);
+ WidthAndSignedness leftInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
+ WidthAndSignedness rightInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
+ WidthAndSignedness resultInfo =
+ getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
// Note we compute the encompassing type with the consideration to the
// result type, so later in LLVM lowering we don't get redundant integral
// extension casts.
- WidthAndSignedness EncompassingInfo =
- EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+ WidthAndSignedness encompassingInfo =
+ EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
- auto EncompassingCIRTy = cir::IntType::get(
- &getMLIRContext(), EncompassingInfo.width, EncompassingInfo.isSigned);
- auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+ auto encompassingCIRTy = cir::IntType::get(
+ &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
+ auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
- mlir::Value Left = emitScalarExpr(LeftArg);
- mlir::Value Right = emitScalarExpr(RightArg);
- Address ResultPtr = emitPointerWithAlignment(ResultArg);
+ mlir::Value left = emitScalarExpr(leftArg);
+ mlir::Value right = emitScalarExpr(rightArg);
+ Address resultPtr = emitPointerWithAlignment(resultArg);
// Extend each operand to the encompassing type, if necessary.
- if (Left.getType() != EncompassingCIRTy)
- Left =
- builder.createCast(cir::CastKind::integral, Left, EncompassingCIRTy);
- if (Right.getType() != EncompassingCIRTy)
- Right =
- builder.createCast(cir::CastKind::integral, Right, EncompassingCIRTy);
+ if (left.getType() != encompassingCIRTy)
+ left =
+ builder.createCast(cir::CastKind::integral, left, encompassingCIRTy);
+ if (right.getType() != encompassingCIRTy)
+ right =
+ builder.createCast(cir::CastKind::integral, right, encompassingCIRTy);
// Perform the operation on the extended values.
- cir::BinOpOverflowKind OpKind;
+ cir::BinOpOverflowKind opKind;
switch (builtinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_add_overflow:
- OpKind = cir::BinOpOverflowKind::Add;
+ opKind = cir::BinOpOverflowKind::Add;
break;
case Builtin::BI__builtin_sub_overflow:
- OpKind = cir::BinOpOverflowKind::Sub;
+ opKind = cir::BinOpOverflowKind::Sub;
break;
case Builtin::BI__builtin_mul_overflow:
- OpKind = cir::BinOpOverflowKind::Mul;
+ opKind = cir::BinOpOverflowKind::Mul;
break;
}
- auto Loc = getLoc(e->getSourceRange());
- auto arithOp =
- cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, OpKind, Left, Right);
+ auto loc = getLoc(e->getSourceRange());
+ auto arithOp = cir::BinOpOverflowOp::create(builder, loc, resultCIRTy,
+ opKind, left, right);
// Here is a slight difference from the original clang CodeGen:
// - In the original clang CodeGen, the checked arithmetic result is
@@ -600,11 +600,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
// Finally, store the result using the pointer.
bool isVolatile =
- ResultArg->getType()->getPointeeType().isVolatileQualified();
- builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
- ResultPtr, isVolatile);
+ resultArg->getType()->getPointeeType().isVolatileQualified();
+ builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
+ resultPtr, isVolatile);
- return RValue::get(ArithOp.getOverflow());
+ return RValue::get(arithOp.getOverflow());
}
case Builtin::BI__builtin_uadd_overflow:
@@ -626,14 +626,14 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow: {
// Scalarize our inputs.
- mlir::Value X = emitScalarExpr(e->getArg(0));
- mlir::Value Y = emitScalarExpr(e->getArg(1));
+ mlir::Value x = emitScalarExpr(e->getArg(0));
+ mlir::Value y = emitScalarExpr(e->getArg(1));
- const clang::Expr *ResultArg = e->getArg(2);
- Address ResultPtr = emitPointerWithAlignment(ResultArg);
+ const clang::Expr *resultArg = e->getArg(2);
+ Address resultPtr = emitPointerWithAlignment(resultArg);
// Decide which of the arithmetic operation we are lowering to:
- cir::BinOpOverflowKind ArithKind;
+ cir::BinOpOverflowKind arithKind;
switch (builtinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
@@ -643,7 +643,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
- ArithKind = cir::BinOpOverflowKind::Add;
+ arithKind = cir::BinOpOverflowKind::Add;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
@@ -651,7 +651,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
- ArithKind = cir::BinOpOverflowKind::Sub;
+ arithKind = cir::BinOpOverflowKind::Sub;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
@@ -659,24 +659,24 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
- ArithKind = cir::BinOpOverflowKind::Mul;
+ arithKind = cir::BinOpOverflowKind::Mul;
break;
}
- clang::QualType ResultQTy =
- ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
- auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+ clang::QualType resultQTy =
+ resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+ auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
- auto Loc = getLoc(e->getSourceRange());
- cir::BinOpOverflowOp ArithOp =
- cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, ArithKind, X, Y);
+ auto loc = getLoc(e->getSourceRange());
+ cir::BinOpOverflowOp arithOp = cir::BinOpOverflowOp::create(
+ builder, loc, resultCIRTy, arithKind, x, y);
bool isVolatile =
- ResultArg->getType()->getPointeeType().isVolatileQualified();
- builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
- ResultPtr, isVolatile);
+ resultArg->getType()->getPointeeType().isVolatileQualified();
+ builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
+ resultPtr, isVolatile);
- return RValue::get(ArithOp.getOverflow());
+ return RValue::get(arithOp.getOverflow());
}
}
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 67725e438cb33..b3c6c357e85c9 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2528,47 +2528,41 @@ mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
}
}
- std::string intrinName = getLLVMIntrinName(
- arithKind, encompassedTyInfo.sign, encompassedTyInfo.width);
- auto intrinNameAttr =
- mlir::StringAttr::get(op.getContext(), intrinName);
+ std::string intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign,
+ encompassedTyInfo.width);
+ auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName);
mlir::IntegerType overflowLLVMTy = rewriter.getI1Type();
- auto intrinRetTy =
- mlir::LLVM::LLVMStructType::getLiteral(
- rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
+ auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
+ rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
- auto callLLVMIntrinOp =
- mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, intrinRetTy,
- intrinNameAttr, mlir::ValueRange{lhs, rhs});
+ auto callLLVMIntrinOp = mlir::LLVM::CallIntrinsicOp::create(
+ rewriter, loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
mlir::Value intrinRet = callLLVMIntrinOp.getResult(0);
- mlir::Value result =
- mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
- ArrayRef<int64_t>{0})
- .getResult();
- mlir::Value overflow =
- mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
- ArrayRef<int64_t>{1})
- .getResult();
+ mlir::Value result = mlir::LLVM::ExtractValueOp::create(
+ rewriter, loc, intrinRet, ArrayRef<int64_t>{0})
+ .getResult();
+ mlir::Value overflow = mlir::LLVM::ExtractValueOp::create(
+ rewriter, loc, intrinRet, ArrayRef<int64_t>{1})
+ .getResult();
if (resultTy.getWidth() < encompassedTyInfo.width) {
mlir::Type resultLLVMTy = getTypeConverter()->convertType(resultTy);
- auto mlir::Value truncResult =
+ auto truncResult =
mlir::LLVM::TruncOp::create(rewriter, loc, resultLLVMTy, result);
// Extend the truncated result back to the encompassing type to check for
// any overflows during the truncation.
mlir::Value truncResultExt;
if (resultTy.isSigned())
- truncResultExt = mlir::LLVM::SExtOp::create(rewriter, loc,
- encompassedLLVMTy, truncResult);
+ truncResultExt = mlir::LLVM::SExtOp::create(
+ rewriter, loc, encompassedLLVMTy, truncResult);
else
- truncResultExt = mlir::LLVM::ZExtOp::create(rewriter, loc,
- encompassedLLVMTy, truncResult);
- auto truncOverflow =
- mlir::LLVM::ICmpOp::create(rewriter, loc, mlir::LLVM::ICmpPredicate::ne,
- truncResultExt, result);
+ truncResultExt = mlir::LLVM::ZExtOp::create(
+ rewriter, loc, encompassedLLVMTy, truncResult);
+ auto truncOverflow = mlir::LLVM::ICmpOp::create(
+ rewriter, loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result);
result = truncResult;
overflow = mlir::LLVM::OrOp::create(rewriter, loc, overflow, truncOverflow);
@@ -2617,8 +2611,9 @@ CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo
CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth(
cir::IntType operandTy, cir::IntType resultTy) {
bool sign = operandTy.getIsSigned() || resultTy.getIsSigned();
- unsigned width = std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
- resultTy.getWidth() + (sign && resultTy.isUnsigned()));
+ unsigned width =
+ std::max(operandTy.getWidth() + (sign && operandTy.isUnsigned()),
+ resultTy.getWidth() + (sign && resultTy.isUnsigned()));
return {sign, width};
}
>From 2fe28ddfb3f2cd2e57adc765d74ce49b1440ea42 Mon Sep 17 00:00:00 2001
From: Adam Smith <adams at nvidia.com>
Date: Fri, 21 Nov 2025 11:49:13 -0800
Subject: [PATCH 23/24] [CIR] Fix BinOpOverflowOp assembly format for multiple
results
The assembly format for operations with multiple results requires a comma
separator, not an arrow. Changed from:
: qualified(type($lhs)) ->
to:
: qualified(type($lhs)) ,
This produces the correct output format:
: !u32i, (!u32i, !cir.bool)
instead of the incorrect:
: !u32i -> (!u32i, !cir.bool)
Also added qualified() to properly format bit-precise integer types as
!cir.int<s, 31> instead of <s, 31>.
Added LLVM and OGCG checks to the test to verify CIR lowering produces
correct LLVM intrinsics matching the original Clang codegen.
---
clang/include/clang/CIR/Dialect/IR/CIROps.td | 2 +-
clang/test/CIR/CodeGen/builtins-overflow.cpp | 14 ++++++++++----
2 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index acbc3a805ec70..9ab713c32548a 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1674,7 +1674,7 @@ def CIR_BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> {
let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow);
let assemblyFormat = [{
- `(` $kind `,` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `->`
+ `(` $kind `,` $lhs `,` $rhs `)` `:` qualified(type($lhs)) `,`
`(` qualified(type($result)) `,` qualified(type($overflow)) `)`
attr-dict
}];
diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp b/clang/test/CIR/CodeGen/builtins-overflow.cpp
index 0014aa960b539..9ee3e7c015209 100644
--- a/clang/test/CIR/CodeGen/builtins-overflow.cpp
+++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp
@@ -17,6 +17,12 @@ bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
// CIR: }
+// LLVM: define{{.*}} i1 @_Z32test_add_overflow_uint_uint_uintjjPj(i32{{.*}}, i32{{.*}}, ptr{{.*}})
+// LLVM: call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+
+// OGCG: define{{.*}} i1 @_Z32test_add_overflow_uint_uint_uintjjPj(i32{{.*}}, i32{{.*}}, ptr{{.*}})
+// OGCG: call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+
bool test_add_overflow_int_int_int(int x, int y, int *res) {
return __builtin_add_overflow(x, y, res);
}
@@ -37,7 +43,7 @@ bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitIn
// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
-// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], %[[#RHS]]) : !cir.int<s, 31>, (!cir.int<s, 31>, !cir.bool)
// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
// CIR: }
@@ -73,7 +79,7 @@ bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitIn
// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
-// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], %[[#RHS]]) : !cir.int<s, 31>, (!cir.int<s, 31>, !cir.bool)
// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
// CIR: }
@@ -109,7 +115,7 @@ bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, _BitIn
// CIR: %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 31>>, !cir.int<s, 31>
// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
-// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], %[[#RHS]]) : !cir.int<s, 31>, (!cir.int<s, 31>, !cir.bool)
// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, !cir.ptr<!cir.int<s, 31>>
// CIR: }
@@ -135,7 +141,7 @@ bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) {
// CIR-NEXT: %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
// CIR-NEXT: %[[#PROM_X:]] = cir.cast integral %[[#X]] : !u32i -> !cir.int<s, 33>
// CIR-NEXT: %[[#PROM_Y:]] = cir.cast integral %[[#Y]] : !s32i -> !cir.int<s, 33>
-// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : <s, 33>, (!s32i, !cir.bool)
+// CIR-NEXT: %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], %[[#PROM_Y]]) : !cir.int<s, 33>, (!s32i, !cir.bool)
// CIR-NEXT: cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
// CIR: }
>From 052ffdb95f423be84f948e868830043f41fed084 Mon Sep 17 00:00:00 2001
From: Andy Kaylor <akaylor at nvidia.com>
Date: Fri, 21 Nov 2025 14:41:55 -0800
Subject: [PATCH 24/24] Fix formatting
---
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 715367666bac9..0c1f842829686 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -1016,7 +1016,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(arithOp.getOverflow());
}
-
+
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
@@ -1088,7 +1088,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return RValue::get(arithOp.getOverflow());
}
-
+
case Builtin::BIaddressof:
case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
More information about the cfe-commits
mailing list