[clang] 2f3c937 - [CIR] Add binary operators (#132420)

via cfe-commits cfe-commits at lists.llvm.org
Tue Mar 25 14:12:31 PDT 2025


Author: Morris Hafner
Date: 2025-03-25T14:12:27-07:00
New Revision: 2f3c93743fc21686158c9ba51da8f25da9a02f9d

URL: https://github.com/llvm/llvm-project/commit/2f3c93743fc21686158c9ba51da8f25da9a02f9d
DIFF: https://github.com/llvm/llvm-project/commit/2f3c93743fc21686158c9ba51da8f25da9a02f9d.diff

LOG: [CIR] Add binary operators (#132420)

This patch adds upstreams support for BinOp including lvalue
assignments. Note that this does not include ternary ops,
BinOpOverflowOp, pointer arithmetic, ShiftOp and SelectOp which are
required for logical binary operators.

---------

Co-authored-by: Morris Hafner <mhafner at nvidia.com>
Co-authored-by: Andy Kaylor <akaylor at nvidia.com>

Added: 
    clang/test/CIR/CodeGen/binop.cpp
    clang/test/CIR/Lowering/binop-bool.cir
    clang/test/CIR/Lowering/binop-fp.cir
    clang/test/CIR/Lowering/binop-overflow.cir
    clang/test/CIR/Lowering/binop-signed-int.cir
    clang/test/CIR/Lowering/binop-unsigned-int.cir

Modified: 
    clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
    clang/include/clang/CIR/Dialect/IR/CIROps.td
    clang/include/clang/CIR/Dialect/IR/CIRTypes.h
    clang/include/clang/CIR/MissingFeatures.h
    clang/lib/CIR/CodeGen/CIRGenBuilder.h
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/Dialect/IR/CIRDialect.cpp
    clang/lib/CIR/Dialect/IR/CIRTypes.cpp
    clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
    clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index c6aea10d46b63..ac7658276ec37 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -10,24 +10,59 @@
 #define LLVM_CLANG_CIR_DIALECT_BUILDER_CIRBASEBUILDER_H
 
 #include "clang/AST/CharUnits.h"
-#include "clang/AST/Type.h"
 #include "clang/CIR/Dialect/IR/CIRAttrs.h"
 #include "clang/CIR/Dialect/IR/CIRDialect.h"
 #include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "llvm/ADT/STLForwardCompat.h"
 #include "llvm/Support/ErrorHandling.h"
 
 #include "mlir/IR/Builders.h"
 #include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/Location.h"
 #include "mlir/IR/Types.h"
 
 namespace cir {
 
+enum class OverflowBehavior {
+  None = 0,
+  NoSignedWrap = 1 << 0,
+  NoUnsignedWrap = 1 << 1,
+  Saturated = 1 << 2,
+};
+
+constexpr OverflowBehavior operator|(OverflowBehavior a, OverflowBehavior b) {
+  return static_cast<OverflowBehavior>(llvm::to_underlying(a) |
+                                       llvm::to_underlying(b));
+}
+
+constexpr OverflowBehavior operator&(OverflowBehavior a, OverflowBehavior b) {
+  return static_cast<OverflowBehavior>(llvm::to_underlying(a) &
+                                       llvm::to_underlying(b));
+}
+
+constexpr OverflowBehavior &operator|=(OverflowBehavior &a,
+                                       OverflowBehavior b) {
+  a = a | b;
+  return a;
+}
+
+constexpr OverflowBehavior &operator&=(OverflowBehavior &a,
+                                       OverflowBehavior b) {
+  a = a & b;
+  return a;
+}
+
 class CIRBaseBuilderTy : public mlir::OpBuilder {
 
 public:
   CIRBaseBuilderTy(mlir::MLIRContext &mlirContext)
       : mlir::OpBuilder(&mlirContext) {}
 
+  mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ,
+                            const llvm::APInt &val) {
+    return create<cir::ConstantOp>(loc, typ, getAttr<cir::IntAttr>(typ, val));
+  }
+
   cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr) {
     return create<cir::ConstantOp>(loc, attr.getType(), attr);
   }
@@ -143,6 +178,93 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
     return createCast(loc, cir::CastKind::bitcast, src, newTy);
   }
 
+  //===--------------------------------------------------------------------===//
+  // Binary Operators
+  //===--------------------------------------------------------------------===//
+
+  mlir::Value createBinop(mlir::Location loc, mlir::Value lhs,
+                          cir::BinOpKind kind, mlir::Value rhs) {
+    return create<cir::BinOp>(loc, lhs.getType(), kind, lhs, rhs);
+  }
+
+  mlir::Value createLowBitsSet(mlir::Location loc, unsigned size,
+                               unsigned bits) {
+    llvm::APInt val = llvm::APInt::getLowBitsSet(size, bits);
+    auto type = cir::IntType::get(getContext(), size, /*isSigned=*/false);
+    return getConstAPInt(loc, type, val);
+  }
+
+  mlir::Value createAnd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    return createBinop(loc, lhs, cir::BinOpKind::And, rhs);
+  }
+
+  mlir::Value createOr(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    return createBinop(loc, lhs, cir::BinOpKind::Or, rhs);
+  }
+
+  mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
+                        OverflowBehavior ob = OverflowBehavior::None) {
+    auto op =
+        create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Mul, lhs, rhs);
+    op.setNoUnsignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
+    op.setNoSignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoSignedWrap));
+    return op;
+  }
+  mlir::Value createNSWMul(mlir::Location loc, mlir::Value lhs,
+                           mlir::Value rhs) {
+    return createMul(loc, lhs, rhs, OverflowBehavior::NoSignedWrap);
+  }
+  mlir::Value createNUWAMul(mlir::Location loc, mlir::Value lhs,
+                            mlir::Value rhs) {
+    return createMul(loc, lhs, rhs, OverflowBehavior::NoUnsignedWrap);
+  }
+
+  mlir::Value createSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
+                        OverflowBehavior ob = OverflowBehavior::Saturated) {
+    auto op =
+        create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Sub, lhs, rhs);
+    op.setNoUnsignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
+    op.setNoSignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoSignedWrap));
+    op.setSaturated(llvm::to_underlying(ob & OverflowBehavior::Saturated));
+    return op;
+  }
+
+  mlir::Value createNSWSub(mlir::Location loc, mlir::Value lhs,
+                           mlir::Value rhs) {
+    return createSub(loc, lhs, rhs, OverflowBehavior::NoSignedWrap);
+  }
+
+  mlir::Value createNUWSub(mlir::Location loc, mlir::Value lhs,
+                           mlir::Value rhs) {
+    return createSub(loc, lhs, rhs, OverflowBehavior::NoUnsignedWrap);
+  }
+
+  mlir::Value createAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
+                        OverflowBehavior ob = OverflowBehavior::None) {
+    auto op =
+        create<cir::BinOp>(loc, lhs.getType(), cir::BinOpKind::Add, lhs, rhs);
+    op.setNoUnsignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoUnsignedWrap));
+    op.setNoSignedWrap(
+        llvm::to_underlying(ob & OverflowBehavior::NoSignedWrap));
+    op.setSaturated(llvm::to_underlying(ob & OverflowBehavior::Saturated));
+    return op;
+  }
+
+  mlir::Value createNSWAdd(mlir::Location loc, mlir::Value lhs,
+                           mlir::Value rhs) {
+    return createAdd(loc, lhs, rhs, OverflowBehavior::NoSignedWrap);
+  }
+
+  mlir::Value createNUWAdd(mlir::Location loc, mlir::Value lhs,
+                           mlir::Value rhs) {
+    return createAdd(loc, lhs, rhs, OverflowBehavior::NoUnsignedWrap);
+  }
+
   //
   // Block handling helpers
   // ----------------------

diff  --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index d7d63e040a2ba..455cc2b8b0277 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -826,6 +826,69 @@ def ForOp : CIR_Op<"for", [LoopOpInterface, NoRegionArguments]> {
   }];
 }
 
+//===----------------------------------------------------------------------===//
+// BinOp
+//===----------------------------------------------------------------------===//
+
+// FIXME: represent Commutative, Idempotent traits for appropriate binops
+def BinOpKind_Mul : I32EnumAttrCase<"Mul", 1, "mul">;
+def BinOpKind_Div : I32EnumAttrCase<"Div", 2, "div">;
+def BinOpKind_Rem : I32EnumAttrCase<"Rem", 3, "rem">;
+def BinOpKind_Add : I32EnumAttrCase<"Add", 4, "add">;
+def BinOpKind_Sub : I32EnumAttrCase<"Sub", 5, "sub">;
+def BinOpKind_And : I32EnumAttrCase<"And", 8, "and">;
+def BinOpKind_Xor : I32EnumAttrCase<"Xor", 9, "xor">;
+def BinOpKind_Or  : I32EnumAttrCase<"Or", 10, "or">;
+// TODO(cir): Do we need a min binop?
+def BinOpKind_Max : I32EnumAttrCase<"Max", 11, "max">;
+
+def BinOpKind : I32EnumAttr<
+    "BinOpKind",
+    "binary operation (arith and logic) kind",
+    [BinOpKind_Mul, BinOpKind_Div, BinOpKind_Rem,
+     BinOpKind_Add, BinOpKind_Sub,
+     BinOpKind_And, BinOpKind_Xor,
+     BinOpKind_Or, BinOpKind_Max]> {
+  let cppNamespace = "::cir";
+}
+
+def BinOp : CIR_Op<"binop", [Pure,
+  SameTypeOperands, SameOperandsAndResultType]> {
+
+  let summary = "Binary operations (arith and logic)";
+  let description = [{
+    cir.binop performs the binary operation according to
+    the specified opcode kind: [mul, div, rem, add, sub,
+    and, xor, or, max].
+
+    It requires two input operands and has one result, all types
+    should be the same.
+
+    ```mlir
+    %7 = cir.binop(add, %1, %2) : !s32i
+    %7 = cir.binop(mul, %1, %2) : !u8i
+    ```
+  }];
+
+  // TODO: get more accurate than CIR_AnyType
+  let results = (outs CIR_AnyType:$result);
+  let arguments = (ins Arg<BinOpKind, "binop kind">:$kind,
+                       CIR_AnyType:$lhs, CIR_AnyType:$rhs,
+                       UnitAttr:$no_unsigned_wrap,
+                       UnitAttr:$no_signed_wrap,
+                       UnitAttr:$saturated);
+
+  let assemblyFormat = [{
+    `(` $kind `,` $lhs `,` $rhs  `)`
+    (`nsw` $no_signed_wrap^)?
+    (`nuw` $no_unsigned_wrap^)?
+    (`sat` $saturated^)?
+    `:` type($lhs) attr-dict
+  }];
+
+  let hasVerifier = 1;
+}
+
 //===----------------------------------------------------------------------===//
 // GlobalOp
 //===----------------------------------------------------------------------===//

diff  --git a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h
index 5d1eb17e146d0..7b0fcbc7cc98f 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRTypes.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIRTypes.h
@@ -21,6 +21,7 @@
 namespace cir {
 
 bool isAnyFloatingPointType(mlir::Type t);
+bool isFPOrFPVectorTy(mlir::Type);
 
 } // namespace cir
 

diff  --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 3e33e5dc60194..795f5e707fbb5 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -79,6 +79,9 @@ struct MissingFeatures {
   static bool opUnarySignedOverflow() { return false; }
   static bool opUnaryPromotionType() { return false; }
 
+  // Clang early optimizations or things defered to LLVM lowering.
+  static bool mayHaveIntegerOverflow() { return false; }
+
   // Misc
   static bool cxxABI() { return false; }
   static bool tryEmitAsConstant() { return false; }
@@ -93,16 +96,19 @@ struct MissingFeatures {
   static bool stackSaveOp() { return false; }
   static bool aggValueSlot() { return false; }
   static bool generateDebugInfo() { return false; }
+  static bool pointerOverflowSanitizer() { return false; }
   static bool fpConstraints() { return false; }
   static bool sanitizers() { return false; }
   static bool addHeapAllocSiteMetadata() { return false; }
   static bool targetCodeGenInfoGetNullPointer() { return false; }
-  static bool CGFPOptionsRAII() { return false; }
   static bool loopInfoStack() { return false; }
   static bool requiresCleanups() { return false; }
   static bool createProfileWeightsForLoop() { return false; }
   static bool emitCondLikelihoodViaExpectIntrinsic() { return false; }
   static bool pgoUse() { return false; }
+  static bool cgFPOptionsRAII() { return false; }
+  static bool metaDataNode() { return false; }
+  static bool fastMathFlags() { return false; }
 
   // Missing types
   static bool dataMemberType() { return false; }
@@ -111,6 +117,8 @@ struct MissingFeatures {
   static bool scalableVectors() { return false; }
   static bool unsizedTypes() { return false; }
   static bool vectorType() { return false; }
+  static bool complexType() { return false; }
+  static bool fixedPointType() { return false; }
 
   // Future CIR operations
   static bool awaitOp() { return false; }
@@ -127,6 +135,8 @@ struct MissingFeatures {
   static bool ternaryOp() { return false; }
   static bool tryOp() { return false; }
   static bool zextOp() { return false; }
+  static bool ptrStrideOp() { return false; }
+  static bool ptrDiffOp() { return false; }
 };
 
 } // namespace cir

diff  --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index fef290612149a..03fb227a464a1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -14,6 +14,7 @@
 
 #include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
 #include "clang/CIR/MissingFeatures.h"
+#include "llvm/ADT/STLExtras.h"
 
 namespace clang::CIRGen {
 
@@ -72,15 +73,72 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     if (const auto arrayVal = mlir::dyn_cast<cir::ConstArrayAttr>(attr)) {
       if (mlir::isa<mlir::StringAttr>(arrayVal.getElts()))
         return false;
-      for (const auto elt : mlir::cast<mlir::ArrayAttr>(arrayVal.getElts())) {
-        if (!isNullValue(elt))
-          return false;
-      }
-      return true;
+
+      return llvm::all_of(
+          mlir::cast<mlir::ArrayAttr>(arrayVal.getElts()),
+          [&](const mlir::Attribute &elt) { return isNullValue(elt); });
     }
     return false;
   }
 
+  //
+  // Type helpers
+  // ------------
+  //
+  cir::IntType getUIntNTy(int n) {
+    switch (n) {
+    case 8:
+      return getUInt8Ty();
+    case 16:
+      return getUInt16Ty();
+    case 32:
+      return getUInt32Ty();
+    case 64:
+      return getUInt64Ty();
+    default:
+      return cir::IntType::get(getContext(), n, false);
+    }
+  }
+
+  cir::IntType getSIntNTy(int n) {
+    switch (n) {
+    case 8:
+      return getSInt8Ty();
+    case 16:
+      return getSInt16Ty();
+    case 32:
+      return getSInt32Ty();
+    case 64:
+      return getSInt64Ty();
+    default:
+      return cir::IntType::get(getContext(), n, true);
+    }
+  }
+
+  cir::VoidType getVoidTy() { return typeCache.VoidTy; }
+
+  cir::IntType getSInt8Ty() { return typeCache.SInt8Ty; }
+  cir::IntType getSInt16Ty() { return typeCache.SInt16Ty; }
+  cir::IntType getSInt32Ty() { return typeCache.SInt32Ty; }
+  cir::IntType getSInt64Ty() { return typeCache.SInt64Ty; }
+
+  cir::IntType getUInt8Ty() { return typeCache.UInt8Ty; }
+  cir::IntType getUInt16Ty() { return typeCache.UInt16Ty; }
+  cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; }
+  cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; }
+
+  bool isInt8Ty(mlir::Type i) {
+    return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty;
+  }
+  bool isInt16Ty(mlir::Type i) {
+    return i == typeCache.UInt16Ty || i == typeCache.SInt16Ty;
+  }
+  bool isInt32Ty(mlir::Type i) {
+    return i == typeCache.UInt32Ty || i == typeCache.SInt32Ty;
+  }
+  bool isInt64Ty(mlir::Type i) {
+    return i == typeCache.UInt64Ty || i == typeCache.SInt64Ty;
+  }
   bool isInt(mlir::Type i) { return mlir::isa<cir::IntType>(i); }
 
   // Creates constant nullptr for pointer type ty.
@@ -88,6 +146,49 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     assert(!cir::MissingFeatures::targetCodeGenInfoGetNullPointer());
     return create<cir::ConstantOp>(loc, ty, getConstPtrAttr(ty, 0));
   }
+
+  mlir::Value createNeg(mlir::Value value) {
+
+    if (auto intTy = mlir::dyn_cast<cir::IntType>(value.getType())) {
+      // Source is a unsigned integer: first cast it to signed.
+      if (intTy.isUnsigned())
+        value = createIntCast(value, getSIntNTy(intTy.getWidth()));
+      return create<cir::UnaryOp>(value.getLoc(), value.getType(),
+                                  cir::UnaryOpKind::Minus, value);
+    }
+
+    llvm_unreachable("negation for the given type is NYI");
+  }
+
+  mlir::Value createFSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    assert(!cir::MissingFeatures::metaDataNode());
+    assert(!cir::MissingFeatures::fpConstraints());
+    assert(!cir::MissingFeatures::fastMathFlags());
+
+    return create<cir::BinOp>(loc, cir::BinOpKind::Sub, lhs, rhs);
+  }
+
+  mlir::Value createFAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    assert(!cir::MissingFeatures::metaDataNode());
+    assert(!cir::MissingFeatures::fpConstraints());
+    assert(!cir::MissingFeatures::fastMathFlags());
+
+    return create<cir::BinOp>(loc, cir::BinOpKind::Add, lhs, rhs);
+  }
+  mlir::Value createFMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    assert(!cir::MissingFeatures::metaDataNode());
+    assert(!cir::MissingFeatures::fpConstraints());
+    assert(!cir::MissingFeatures::fastMathFlags());
+
+    return create<cir::BinOp>(loc, cir::BinOpKind::Mul, lhs, rhs);
+  }
+  mlir::Value createFDiv(mlir::Location loc, mlir::Value lhs, mlir::Value rhs) {
+    assert(!cir::MissingFeatures::metaDataNode());
+    assert(!cir::MissingFeatures::fpConstraints());
+    assert(!cir::MissingFeatures::fastMathFlags());
+
+    return create<cir::BinOp>(loc, cir::BinOpKind::Div, lhs, rhs);
+  }
 };
 
 } // namespace clang::CIRGen

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 306130b80d457..db062f95f122e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -149,8 +149,8 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
     Address addr = Address::invalid();
 
     // The variable should generally be present in the local decl map.
-    auto iter = LocalDeclMap.find(vd);
-    if (iter != LocalDeclMap.end()) {
+    auto iter = localDeclMap.find(vd);
+    if (iter != localDeclMap.end()) {
       addr = iter->second;
     } else {
       // Otherwise, it might be static local we haven't emitted yet for some
@@ -176,7 +176,7 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *e) {
     return createDummyValue(getLoc(loc), boolTy);
   }
 
-  assert(!cir::MissingFeatures::CGFPOptionsRAII());
+  assert(!cir::MissingFeatures::cgFPOptionsRAII());
   if (!e->getType()->isAnyComplexType())
     return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
 
@@ -210,7 +210,7 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
 
     if (e->getType()->isAnyComplexType()) {
       cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec");
-      return LValue();
+      lv = LValue();
     } else {
       emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true);
     }
@@ -232,6 +232,62 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
   llvm_unreachable("Unknown unary operator kind!");
 }
 
+LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
+  // Comma expressions just emit their LHS then their RHS as an l-value.
+  if (e->getOpcode() == BO_Comma) {
+    emitIgnoredExpr(e->getLHS());
+    return emitLValue(e->getRHS());
+  }
+
+  if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) {
+    cgm.errorNYI(e->getSourceRange(), "member pointers");
+    return {};
+  }
+
+  assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+  // Note that in all of these cases, __block variables need the RHS
+  // evaluated first just in case the variable gets moved by the RHS.
+
+  switch (CIRGenFunction::getEvaluationKind(e->getType())) {
+  case cir::TEK_Scalar: {
+    assert(!cir::MissingFeatures::objCLifetime());
+    if (e->getLHS()->getType().getObjCLifetime() !=
+        clang::Qualifiers::ObjCLifetime::OCL_None) {
+      cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
+      return {};
+    }
+
+    RValue rv = emitAnyExpr(e->getRHS());
+    LValue lv = emitLValue(e->getLHS());
+
+    SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
+    if (lv.isBitField()) {
+      cgm.errorNYI(e->getSourceRange(), "bitfields");
+      return {};
+    }
+    emitStoreThroughLValue(rv, lv);
+
+    if (getLangOpts().OpenMP) {
+      cgm.errorNYI(e->getSourceRange(), "openmp");
+      return {};
+    }
+
+    return lv;
+  }
+
+  case cir::TEK_Complex: {
+    assert(!cir::MissingFeatures::complexType());
+    cgm.errorNYI(e->getSourceRange(), "complex l-values");
+    return {};
+  }
+  case cir::TEK_Aggregate:
+    cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
+    return {};
+  }
+  llvm_unreachable("bad evaluation kind");
+}
+
 /// Emit code to compute the specified expression which
 /// can have any type.  The result is returned as an RValue struct.
 RValue CIRGenFunction::emitAnyExpr(const Expr *e) {

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index ca0090f8d35b3..52bd3b2933744 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -17,6 +17,7 @@
 #include "clang/AST/StmtVisitor.h"
 #include "clang/CIR/MissingFeatures.h"
 
+#include "mlir/IR/Location.h"
 #include "mlir/IR/Value.h"
 
 #include <cassert>
@@ -26,6 +27,53 @@ using namespace clang::CIRGen;
 
 namespace {
 
+struct BinOpInfo {
+  mlir::Value lhs;
+  mlir::Value rhs;
+  SourceRange loc;
+  QualType fullType;             // Type of operands and result
+  QualType compType;             // Type used for computations. Element type
+                                 // for vectors, otherwise same as FullType.
+  BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
+  FPOptions fpfeatures;
+  const Expr *e; // Entire expr, for error unsupported.  May not be binop.
+
+  /// Check if the binop computes a division or a remainder.
+  bool isDivRemOp() const {
+    return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
+           opcode == BO_RemAssign;
+  }
+
+  /// Check if the binop can result in integer overflow.
+  bool mayHaveIntegerOverflow() const {
+    // Without constant input, we can't rule out overflow.
+    auto lhsci = dyn_cast<cir::ConstantOp>(lhs.getDefiningOp());
+    auto rhsci = dyn_cast<cir::ConstantOp>(rhs.getDefiningOp());
+    if (!lhsci || !rhsci)
+      return true;
+
+    assert(!cir::MissingFeatures::mayHaveIntegerOverflow());
+    // TODO(cir): For now we just assume that we might overflow
+    return true;
+  }
+
+  /// Check if at least one operand is a fixed point type. In such cases,
+  /// this operation did not follow usual arithmetic conversion and both
+  /// operands might not be of the same type.
+  bool isFixedPointOp() const {
+    // We cannot simply check the result type since comparison operations
+    // return an int.
+    if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
+      QualType lhstype = binOp->getLHS()->getType();
+      QualType rhstype = binOp->getRHS()->getType();
+      return lhstype->isFixedPointType() || rhstype->isFixedPointType();
+    }
+    if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
+      return unop->getSubExpr()->getType()->isFixedPointType();
+    return false;
+  }
+};
+
 class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
   CIRGenFunction &cgf;
   CIRGenBuilderTy &builder;
@@ -35,6 +83,22 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
   ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
       : cgf(cgf), builder(builder) {}
 
+  //===--------------------------------------------------------------------===//
+  //                               Utilities
+  //===--------------------------------------------------------------------===//
+
+  mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
+    cgf.cgm.errorNYI(result.getLoc(), "floating cast for promoted value");
+    return {};
+  }
+
+  mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
+    cgf.cgm.errorNYI(result.getLoc(), "floating cast for unpromoted value");
+    return {};
+  }
+
+  mlir::Value emitPromoted(const Expr *e, QualType promotionType);
+
   //===--------------------------------------------------------------------===//
   //                            Visitor Methods
   //===--------------------------------------------------------------------===//
@@ -60,6 +124,10 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
     return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getScalarVal();
   }
 
+  mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
+    return cgf.emitLoadOfLValue(lv, loc).getScalarVal();
+  }
+
   // l-values
   mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
     assert(!cir::MissingFeatures::tryEmitAsConstant());
@@ -308,14 +376,14 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
         // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
         value = emitUnaryOp(e, kind, input);
       }
-    } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+    } else if (isa<PointerType>(type)) {
       cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec pointer");
       return {};
     } else if (type->isVectorType()) {
       cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
       return {};
     } else if (type->isRealFloatingType()) {
-      assert(!cir::MissingFeatures::CGFPOptionsRAII());
+      assert(!cir::MissingFeatures::cgFPOptionsRAII());
 
       if (type->isHalfType() &&
           !cgf.getContext().getLangOpts().NativeHalfType) {
@@ -558,8 +626,223 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
     return res;
   }
+
+  BinOpInfo emitBinOps(const BinaryOperator *e,
+                       QualType promotionType = QualType()) {
+    BinOpInfo result;
+    result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
+    result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
+    if (!promotionType.isNull())
+      result.fullType = promotionType;
+    else
+      result.fullType = e->getType();
+    result.compType = result.fullType;
+    if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
+      result.compType = vecType->getElementType();
+    }
+    result.opcode = e->getOpcode();
+    result.loc = e->getSourceRange();
+    // TODO(cir): Result.FPFeatures
+    assert(!cir::MissingFeatures::cgFPOptionsRAII());
+    result.e = e;
+    return result;
+  }
+
+  mlir::Value emitMul(const BinOpInfo &ops);
+  mlir::Value emitDiv(const BinOpInfo &ops);
+  mlir::Value emitRem(const BinOpInfo &ops);
+  mlir::Value emitAdd(const BinOpInfo &ops);
+  mlir::Value emitSub(const BinOpInfo &ops);
+  mlir::Value emitShl(const BinOpInfo &ops);
+  mlir::Value emitShr(const BinOpInfo &ops);
+  mlir::Value emitAnd(const BinOpInfo &ops);
+  mlir::Value emitXor(const BinOpInfo &ops);
+  mlir::Value emitOr(const BinOpInfo &ops);
+
+  LValue emitCompoundAssignLValue(
+      const CompoundAssignOperator *e,
+      mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
+      mlir::Value &result);
+  mlir::Value
+  emitCompoundAssign(const CompoundAssignOperator *e,
+                     mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
+
+  // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
+  // codegen.
+  QualType getPromotionType(QualType ty) {
+    if (ty->getAs<ComplexType>()) {
+      assert(!cir::MissingFeatures::complexType());
+      cgf.cgm.errorNYI("promotion to complex type");
+      return QualType();
+    }
+    if (ty.UseExcessPrecision(cgf.getContext())) {
+      if (ty->getAs<VectorType>()) {
+        assert(!cir::MissingFeatures::vectorType());
+        cgf.cgm.errorNYI("promotion to vector type");
+        return QualType();
+      }
+      return cgf.getContext().FloatTy;
+    }
+    return QualType();
+  }
+
+// Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP)                                                        \
+  mlir::Value VisitBin##OP(const BinaryOperator *e) {                          \
+    QualType promotionTy = getPromotionType(e->getType());                     \
+    auto result = emit##OP(emitBinOps(e, promotionTy));                        \
+    if (result && !promotionTy.isNull())                                       \
+      result = emitUnPromotedValue(result, e->getType());                      \
+    return result;                                                             \
+  }                                                                            \
+  mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) {          \
+    return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP);                \
+  }
+
+  HANDLEBINOP(Mul)
+  HANDLEBINOP(Div)
+  HANDLEBINOP(Rem)
+  HANDLEBINOP(Add)
+  HANDLEBINOP(Sub)
+  HANDLEBINOP(Shl)
+  HANDLEBINOP(Shr)
+  HANDLEBINOP(And)
+  HANDLEBINOP(Xor)
+  HANDLEBINOP(Or)
+#undef HANDLEBINOP
 };
 
+LValue ScalarExprEmitter::emitCompoundAssignLValue(
+    const CompoundAssignOperator *e,
+    mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
+    mlir::Value &result) {
+  QualType lhsTy = e->getLHS()->getType();
+  BinOpInfo opInfo;
+
+  if (e->getComputationResultType()->isAnyComplexType()) {
+    cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
+    return LValue();
+  }
+
+  // Emit the RHS first.  __block variables need to have the rhs evaluated
+  // first, plus this should improve codegen a little.
+
+  QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
+  if (promotionTypeCR.isNull())
+    promotionTypeCR = e->getComputationResultType();
+
+  QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
+  QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
+
+  if (!promotionTypeRHS.isNull())
+    opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
+  else
+    opInfo.rhs = Visit(e->getRHS());
+
+  opInfo.fullType = promotionTypeCR;
+  opInfo.compType = opInfo.fullType;
+  if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
+    opInfo.compType = vecType->getElementType();
+  opInfo.opcode = e->getOpcode();
+  opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
+  opInfo.e = e;
+  opInfo.loc = e->getSourceRange();
+
+  // Load/convert the LHS
+  LValue lhsLV = cgf.emitLValue(e->getLHS());
+
+  if (lhsTy->getAs<AtomicType>()) {
+    cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
+    return LValue();
+  }
+
+  opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
+
+  CIRGenFunction::SourceLocRAIIObject sourceloc{
+      cgf, cgf.getLoc(e->getSourceRange())};
+  SourceLocation loc = e->getExprLoc();
+  if (!promotionTypeLHS.isNull())
+    opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
+  else
+    opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
+                                      e->getComputationLHSType(), loc);
+
+  // Expand the binary operator.
+  result = (this->*func)(opInfo);
+
+  // Convert the result back to the LHS type,
+  // potentially with Implicit Conversion sanitizer check.
+  result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
+                                ScalarConversionOpts(cgf.sanOpts));
+
+  // Store the result value into the LHS lvalue. Bit-fields are handled
+  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+  // 'An assignment expression has the value of the left operand after the
+  // assignment...'.
+  if (lhsLV.isBitField())
+    cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
+  else
+    cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
+
+  if (cgf.getLangOpts().OpenMP)
+    cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
+
+  return lhsLV;
+}
+
+mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
+                                            QualType promotionType) {
+  e = e->IgnoreParens();
+  if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
+    switch (bo->getOpcode()) {
+#define HANDLE_BINOP(OP)                                                       \
+  case BO_##OP:                                                                \
+    return emit##OP(emitBinOps(bo, promotionType));
+      HANDLE_BINOP(Add)
+      HANDLE_BINOP(Sub)
+      HANDLE_BINOP(Mul)
+      HANDLE_BINOP(Div)
+#undef HANDLE_BINOP
+    default:
+      break;
+    }
+  } else if (isa<UnaryOperator>(e)) {
+    cgf.cgm.errorNYI(e->getSourceRange(), "unary operators");
+    return {};
+  }
+  mlir::Value result = Visit(const_cast<Expr *>(e));
+  if (result) {
+    if (!promotionType.isNull())
+      return emitPromotedValue(result, promotionType);
+    return emitUnPromotedValue(result, e->getType());
+  }
+  return result;
+}
+
+mlir::Value ScalarExprEmitter::emitCompoundAssign(
+    const CompoundAssignOperator *e,
+    mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
+
+  bool ignore = std::exchange(ignoreResultAssign, false);
+  mlir::Value rhs;
+  LValue lhs = emitCompoundAssignLValue(e, func, rhs);
+
+  // If the result is clearly ignored, return now.
+  if (ignore)
+    return {};
+
+  // The result of an assignment in C is the assigned r-value.
+  if (!cgf.getLangOpts().CPlusPlus)
+    return rhs;
+
+  // If the lvalue is non-volatile, return the computed value of the assignment.
+  if (!lhs.isVolatile())
+    return rhs;
+
+  // Otherwise, reload the value.
+  return emitLoadOfLValue(lhs, e->getExprLoc());
+}
+
 } // namespace
 
 /// Emit the computation of the specified expression of scalar type.
@@ -570,13 +853,336 @@ mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e) {
   return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
 }
 
-[[maybe_unused]] static bool MustVisitNullValue(const Expr *e) {
+mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *e,
+                                                   QualType promotionType) {
+  if (!promotionType.isNull())
+    return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
+  return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
+}
+
+[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
   // If a null pointer expression's type is the C++0x nullptr_t, then
   // it's not necessarily a simple constant and it must be evaluated
   // for its potential side effects.
   return e->getType()->isNullPtrType();
 }
 
+/// If \p e is a widened promoted integer, get its base (unpromoted) type.
+static std::optional<QualType>
+getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
+  const Expr *base = e->IgnoreImpCasts();
+  if (e == base)
+    return std::nullopt;
+
+  QualType baseTy = base->getType();
+  if (!astContext.isPromotableIntegerType(baseTy) ||
+      astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
+    return std::nullopt;
+
+  return baseTy;
+}
+
+/// Check if \p e is a widened promoted integer.
+[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
+                                                const Expr *e) {
+  return getUnwidenedIntegerType(astContext, e).has_value();
+}
+
+/// Check if we can skip the overflow check for \p Op.
+[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
+                                                   const BinOpInfo &op) {
+  assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
+         "Expected a unary or binary operator");
+
+  // If the binop has constant inputs and we can prove there is no overflow,
+  // we can elide the overflow check.
+  if (!op.mayHaveIntegerOverflow())
+    return true;
+
+  // If a unary op has a widened operand, the op cannot overflow.
+  if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
+    return !uo->canOverflow();
+
+  // We usually don't need overflow checks for binops with widened operands.
+  // Multiplication with promoted unsigned operands is a special case.
+  const auto *bo = cast<BinaryOperator>(op.e);
+  std::optional<QualType> optionalLHSTy =
+      getUnwidenedIntegerType(astContext, bo->getLHS());
+  if (!optionalLHSTy)
+    return false;
+
+  std::optional<QualType> optionalRHSTy =
+      getUnwidenedIntegerType(astContext, bo->getRHS());
+  if (!optionalRHSTy)
+    return false;
+
+  QualType lhsTy = *optionalLHSTy;
+  QualType rhsTy = *optionalRHSTy;
+
+  // This is the simple case: binops without unsigned multiplication, and with
+  // widened operands. No overflow check is needed here.
+  if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
+      !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
+    return true;
+
+  // For unsigned multiplication the overflow check can be elided if either one
+  // of the unpromoted types are less than half the size of the promoted type.
+  unsigned promotedSize = astContext.getTypeSize(op.e->getType());
+  return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
+         (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
+}
+
+/// Emit pointer + index arithmetic.
+static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf,
+                                         const BinOpInfo &op,
+                                         bool isSubtraction) {
+  cgf.cgm.errorNYI(op.loc, "pointer arithmetic");
+  return {};
+}
+
+mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
+  const mlir::Location loc = cgf.getLoc(ops.loc);
+  if (ops.compType->isSignedIntegerOrEnumerationType()) {
+    switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
+    case LangOptions::SOB_Defined:
+      if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+        return builder.createMul(loc, ops.lhs, ops.rhs);
+      [[fallthrough]];
+    case LangOptions::SOB_Undefined:
+      if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+        return builder.createNSWMul(loc, ops.lhs, ops.rhs);
+      [[fallthrough]];
+    case LangOptions::SOB_Trapping:
+      if (canElideOverflowCheck(cgf.getContext(), ops))
+        return builder.createNSWMul(loc, ops.lhs, ops.rhs);
+      cgf.cgm.errorNYI("sanitizers");
+    }
+  }
+  if (ops.fullType->isConstantMatrixType()) {
+    assert(!cir::MissingFeatures::matrixType());
+    cgf.cgm.errorNYI("matrix types");
+    return nullptr;
+  }
+  if (ops.compType->isUnsignedIntegerType() &&
+      cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+      !canElideOverflowCheck(cgf.getContext(), ops))
+    cgf.cgm.errorNYI("unsigned int overflow sanitizer");
+
+  if (cir::isFPOrFPVectorTy(ops.lhs.getType())) {
+    assert(!cir::MissingFeatures::cgFPOptionsRAII());
+    return builder.createFMul(loc, ops.lhs, ops.rhs);
+  }
+
+  if (ops.isFixedPointOp()) {
+    assert(!cir::MissingFeatures::fixedPointType());
+    cgf.cgm.errorNYI("fixed point");
+    return nullptr;
+  }
+
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Mul, ops.lhs, ops.rhs);
+}
+mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Div, ops.lhs, ops.rhs);
+}
+mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Rem, ops.lhs, ops.rhs);
+}
+
+mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
+  if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
+      mlir::isa<cir::PointerType>(ops.rhs.getType()))
+    return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
+
+  const mlir::Location loc = cgf.getLoc(ops.loc);
+  if (ops.compType->isSignedIntegerOrEnumerationType()) {
+    switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
+    case LangOptions::SOB_Defined:
+      if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+        return builder.createAdd(loc, ops.lhs, ops.rhs);
+      [[fallthrough]];
+    case LangOptions::SOB_Undefined:
+      if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+        return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
+      [[fallthrough]];
+    case LangOptions::SOB_Trapping:
+      if (canElideOverflowCheck(cgf.getContext(), ops))
+        return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
+      cgf.cgm.errorNYI("sanitizers");
+    }
+  }
+  if (ops.fullType->isConstantMatrixType()) {
+    assert(!cir::MissingFeatures::matrixType());
+    cgf.cgm.errorNYI("matrix types");
+    return nullptr;
+  }
+
+  if (ops.compType->isUnsignedIntegerType() &&
+      cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+      !canElideOverflowCheck(cgf.getContext(), ops))
+    cgf.cgm.errorNYI("unsigned int overflow sanitizer");
+
+  if (cir::isFPOrFPVectorTy(ops.lhs.getType())) {
+    assert(!cir::MissingFeatures::cgFPOptionsRAII());
+    return builder.createFAdd(loc, ops.lhs, ops.rhs);
+  }
+
+  if (ops.isFixedPointOp()) {
+    assert(!cir::MissingFeatures::fixedPointType());
+    cgf.cgm.errorNYI("fixed point");
+    return {};
+  }
+
+  return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Add, ops.lhs, ops.rhs);
+}
+
+mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
+  const mlir::Location loc = cgf.getLoc(ops.loc);
+  // The LHS is always a pointer if either side is.
+  if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
+    if (ops.compType->isSignedIntegerOrEnumerationType()) {
+      switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
+      case LangOptions::SOB_Defined: {
+        if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+          return builder.createSub(loc, ops.lhs, ops.rhs);
+        [[fallthrough]];
+      }
+      case LangOptions::SOB_Undefined:
+        if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
+          return builder.createNSWSub(loc, ops.lhs, ops.rhs);
+        [[fallthrough]];
+      case LangOptions::SOB_Trapping:
+        if (canElideOverflowCheck(cgf.getContext(), ops))
+          return builder.createNSWSub(loc, ops.lhs, ops.rhs);
+        cgf.cgm.errorNYI("sanitizers");
+      }
+    }
+
+    if (ops.fullType->isConstantMatrixType()) {
+      assert(!cir::MissingFeatures::matrixType());
+      cgf.cgm.errorNYI("matrix types");
+      return nullptr;
+    }
+
+    if (ops.compType->isUnsignedIntegerType() &&
+        cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+        !canElideOverflowCheck(cgf.getContext(), ops))
+      cgf.cgm.errorNYI("unsigned int overflow sanitizer");
+
+    if (cir::isFPOrFPVectorTy(ops.lhs.getType())) {
+      assert(!cir::MissingFeatures::cgFPOptionsRAII());
+      return builder.createFSub(loc, ops.lhs, ops.rhs);
+    }
+
+    if (ops.isFixedPointOp()) {
+      assert(!cir::MissingFeatures::fixedPointType());
+      cgf.cgm.errorNYI("fixed point");
+      return {};
+    }
+
+    return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                      cgf.convertType(ops.fullType),
+                                      cir::BinOpKind::Sub, ops.lhs, ops.rhs);
+  }
+
+  // If the RHS is not a pointer, then we have normal pointer
+  // arithmetic.
+  if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
+    return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
+
+  // Otherwise, this is a pointer subtraction
+
+  // Do the raw subtraction part.
+  //
+  // TODO(cir): note for LLVM lowering out of this; when expanding this into
+  // LLVM we shall take VLA's, division by element size, etc.
+  //
+  // See more in `EmitSub` in CGExprScalar.cpp.
+  assert(!cir::MissingFeatures::ptrDiffOp());
+  cgf.cgm.errorNYI("ptr
diff ");
+  return {};
+}
+
+mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
+  // TODO: This misses out on the sanitizer check below.
+  if (ops.isFixedPointOp()) {
+    assert(cir::MissingFeatures::fixedPointType());
+    cgf.cgm.errorNYI("fixed point");
+    return {};
+  }
+
+  // CIR accepts shift between 
diff erent types, meaning nothing special
+  // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
+  // promote or truncate the RHS to the same size as the LHS.
+
+  bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
+                            ops.compType->hasSignedIntegerRepresentation() &&
+                            !cgf.getLangOpts().isSignedOverflowDefined() &&
+                            !cgf.getLangOpts().CPlusPlus20;
+  bool sanitizeUnsignedBase =
+      cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
+      ops.compType->hasUnsignedIntegerRepresentation();
+  bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
+  bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
+
+  // OpenCL 6.3j: shift values are effectively % word size of LHS.
+  if (cgf.getLangOpts().OpenCL)
+    cgf.cgm.errorNYI("opencl");
+  else if ((sanitizeBase || sanitizeExponent) &&
+           mlir::isa<cir::IntType>(ops.lhs.getType()))
+    cgf.cgm.errorNYI("sanitizers");
+
+  cgf.cgm.errorNYI("shift ops");
+  return {};
+}
+
+mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
+  // TODO: This misses out on the sanitizer check below.
+  if (ops.isFixedPointOp()) {
+    assert(cir::MissingFeatures::fixedPointType());
+    cgf.cgm.errorNYI("fixed point");
+    return {};
+  }
+
+  // CIR accepts shift between 
diff erent types, meaning nothing special
+  // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
+  // promote or truncate the RHS to the same size as the LHS.
+
+  // OpenCL 6.3j: shift values are effectively % word size of LHS.
+  if (cgf.getLangOpts().OpenCL)
+    cgf.cgm.errorNYI("opencl");
+  else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
+           mlir::isa<cir::IntType>(ops.lhs.getType()))
+    cgf.cgm.errorNYI("sanitizers");
+
+  // Note that we don't need to distinguish unsigned treatment at this
+  // point since it will be handled later by LLVM lowering.
+  cgf.cgm.errorNYI("shift ops");
+  return {};
+}
+
+mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::And, ops.lhs, ops.rhs);
+}
+mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Xor, ops.lhs, ops.rhs);
+}
+mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
+  return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
+                                    cgf.convertType(ops.fullType),
+                                    cir::BinOpKind::Or, ops.lhs, ops.rhs);
+}
+
 // Emit code for an explicit or implicit cast.  Implicit
 // casts have to handle a more broad range of conversions than explicit
 // casts, as they handle things like function to ptr-to-function decay
@@ -661,7 +1267,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
   }
 
   case CK_NullToPointer: {
-    if (MustVisitNullValue(subExpr))
+    if (mustVisitNullValue(subExpr))
       cgf.emitIgnoredExpr(subExpr);
 
     // Note that DestTy is used as the MLIR type instead of a custom
@@ -790,9 +1396,9 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
                cgf.cgm.UInt64Ty, e->EvaluateKnownConstInt(cgf.getContext())));
 }
 
-mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *E,
-                                                    LValue LV, bool isInc,
+mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
+                                                    LValue lv, bool isInc,
                                                     bool isPre) {
   return ScalarExprEmitter(*this, builder)
-      .emitScalarPrePostIncDec(E, LV, isInc, isPre);
+      .emitScalarPrePostIncDec(e, lv, isInc, isPre);
 }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 16547f2401292..4ba3d416007f2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -444,6 +444,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
     return LValue();
   case Expr::UnaryOperatorClass:
     return emitUnaryOpLValue(cast<UnaryOperator>(e));
+  case Expr::BinaryOperatorClass:
+    return emitBinaryOperatorLValue(cast<BinaryOperator>(e));
   case Expr::DeclRefExprClass:
     return emitDeclRefLValue(cast<DeclRefExpr>(e));
   }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 631217cf67762..7d1fa0712c7ac 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -29,8 +29,6 @@
 #include "clang/CIR/MissingFeatures.h"
 #include "clang/CIR/TypeEvaluationKind.h"
 
-#include "llvm/ADT/ScopedHashTable.h"
-
 namespace {
 class ScalarExprEmitter;
 } // namespace
@@ -62,7 +60,7 @@ class CIRGenFunction : public CIRGenTypeCache {
   using DeclMapTy = llvm::DenseMap<const clang::Decl *, Address>;
   /// This keeps track of the CIR allocas or globals for local C
   /// declarations.
-  DeclMapTy LocalDeclMap;
+  DeclMapTy localDeclMap;
 
   clang::ASTContext &getContext() const { return cgm.getASTContext(); }
 
@@ -80,11 +78,11 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// this fuction. These can potentially set the return value.
   bool sawAsmBlock = false;
 
-  mlir::Type convertTypeForMem(QualType T);
+  mlir::Type convertTypeForMem(QualType t);
 
-  mlir::Type convertType(clang::QualType T);
-  mlir::Type convertType(const TypeDecl *T) {
-    return convertType(getContext().getTypeDeclType(T));
+  mlir::Type convertType(clang::QualType t);
+  mlir::Type convertType(const TypeDecl *t) {
+    return convertType(getContext().getTypeDeclType(t));
   }
 
   ///  Return the cir::TypeEvaluationKind of QualType \c type.
@@ -224,6 +222,7 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
   LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
+  LValue emitBinaryOperatorLValue(const BinaryOperator *e);
 
   /// Determine whether the given initializer is trivial in the sense
   /// that it requires no code to be generated.
@@ -322,8 +321,8 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   /// Set the address of a local variable.
   void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr) {
-    assert(!LocalDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
-    LocalDeclMap.insert({vd, addr});
+    assert(!localDeclMap.count(vd) && "Decl already exists in LocalDeclMap!");
+    localDeclMap.insert({vd, addr});
     // TODO: Add symbol table support
   }
 
@@ -332,6 +331,7 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   /// Emit the computation of the specified expression of scalar type.
   mlir::Value emitScalarExpr(const clang::Expr *e);
+  mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType);
   cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
                            cir::FuncType funcType);
 
@@ -341,7 +341,7 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// Emit code for the start of a function.
   /// \param loc       The location to be associated with the function.
   /// \param startLoc  The location of the function body.
-  void startFunction(clang::GlobalDecl gd, clang::QualType retTy,
+  void startFunction(clang::GlobalDecl gd, clang::QualType returnType,
                      cir::FuncOp fn, cir::FuncType funcType,
                      FunctionArgList args, clang::SourceLocation loc,
                      clang::SourceLocation startLoc);

diff  --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index ae86fefcf3657..cdcfa77b66379 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -75,8 +75,8 @@ void cir::CIRDialect::initialize() {
 
 // Check if a region's termination omission is valid and, if so, creates and
 // inserts the omitted terminator into the region.
-LogicalResult ensureRegionTerm(OpAsmParser &parser, Region &region,
-                               SMLoc errLoc) {
+static LogicalResult ensureRegionTerm(OpAsmParser &parser, Region &region,
+                                      SMLoc errLoc) {
   Location eLoc = parser.getEncodedSourceLoc(parser.getCurrentLocation());
   OpBuilder builder(parser.getBuilder().getContext());
 
@@ -102,7 +102,7 @@ LogicalResult ensureRegionTerm(OpAsmParser &parser, Region &region,
 }
 
 // True if the region's terminator should be omitted.
-bool omitRegionTerm(mlir::Region &r) {
+static bool omitRegionTerm(mlir::Region &r) {
   const auto singleNonEmptyBlock = r.hasOneBlock() && !r.back().empty();
   const auto yieldsNothing = [&r]() {
     auto y = dyn_cast<cir::YieldOp>(r.back().getTerminator());
@@ -346,9 +346,9 @@ LogicalResult cir::CastOp::verify() {
       return emitOpError() << "requires two types 
diff er in addrspace only";
     return success();
   }
+  default:
+    llvm_unreachable("Unknown CastOp kind?");
   }
-
-  llvm_unreachable("Unknown CastOp kind?");
 }
 
 static bool isIntOrBoolCast(cir::CastOp op) {
@@ -728,6 +728,37 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
 // been implemented yet.
 mlir::LogicalResult cir::FuncOp::verify() { return success(); }
 
+LogicalResult cir::BinOp::verify() {
+  bool noWrap = getNoUnsignedWrap() || getNoSignedWrap();
+  bool saturated = getSaturated();
+
+  if (!isa<cir::IntType>(getType()) && noWrap)
+    return emitError()
+           << "only operations on integer values may have nsw/nuw flags";
+
+  bool noWrapOps = getKind() == cir::BinOpKind::Add ||
+                   getKind() == cir::BinOpKind::Sub ||
+                   getKind() == cir::BinOpKind::Mul;
+
+  bool saturatedOps =
+      getKind() == cir::BinOpKind::Add || getKind() == cir::BinOpKind::Sub;
+
+  if (noWrap && !noWrapOps)
+    return emitError() << "The nsw/nuw flags are applicable to opcodes: 'add', "
+                          "'sub' and 'mul'";
+  if (saturated && !saturatedOps)
+    return emitError() << "The saturated flag is applicable to opcodes: 'add' "
+                          "and 'sub'";
+  if (noWrap && saturated)
+    return emitError() << "The nsw/nuw flags and the saturated flag are "
+                          "mutually exclusive";
+
+  assert(!cir::MissingFeatures::complexType());
+  // TODO(cir): verify for complex binops
+
+  return mlir::success();
+}
+
 //===----------------------------------------------------------------------===//
 // UnaryOp
 //===----------------------------------------------------------------------===//

diff  --git a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp
index 6291297492227..356f7f6244db8 100644
--- a/clang/lib/CIR/Dialect/IR/CIRTypes.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRTypes.cpp
@@ -14,6 +14,7 @@
 
 #include "mlir/IR/DialectImplementation.h"
 #include "clang/CIR/Dialect/IR/CIRDialect.h"
+#include "clang/CIR/MissingFeatures.h"
 #include "llvm/ADT/TypeSwitch.h"
 
 //===----------------------------------------------------------------------===//
@@ -274,6 +275,15 @@ bool cir::isAnyFloatingPointType(mlir::Type t) {
              cir::FP80Type, cir::BF16Type, cir::FP16Type, cir::FP128Type>(t);
 }
 
+//===----------------------------------------------------------------------===//
+// Floating-point and Float-point Vector type helpers
+//===----------------------------------------------------------------------===//
+
+bool cir::isFPOrFPVectorTy(mlir::Type t) {
+  assert(!cir::MissingFeatures::vectorType());
+  return isAnyFloatingPointType(t);
+}
+
 //===----------------------------------------------------------------------===//
 // FuncType Definitions
 //===----------------------------------------------------------------------===//

diff  --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 30cbee48b4bdc..a16840cc6bfef 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -32,7 +32,6 @@
 #include "clang/CIR/Passes.h"
 #include "llvm/ADT/TypeSwitch.h"
 #include "llvm/IR/Module.h"
-#include "llvm/Support/Error.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/TimeProfiler.h"
 
@@ -85,12 +84,11 @@ static mlir::Value createIntCast(mlir::OpBuilder &bld, mlir::Value src,
 
   if (dstWidth > srcWidth && isSigned)
     return bld.create<mlir::LLVM::SExtOp>(loc, dstTy, src);
-  else if (dstWidth > srcWidth)
+  if (dstWidth > srcWidth)
     return bld.create<mlir::LLVM::ZExtOp>(loc, dstTy, src);
-  else if (dstWidth < srcWidth)
+  if (dstWidth < srcWidth)
     return bld.create<mlir::LLVM::TruncOp>(loc, dstTy, src);
-  else
-    return bld.create<mlir::LLVM::BitcastOp>(loc, dstTy, src);
+  return bld.create<mlir::LLVM::BitcastOp>(loc, dstTy, src);
 }
 
 /// Emits the value from memory as expected by its users. Should be called when
@@ -994,6 +992,131 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
                         << elementType;
 }
 
+mlir::LLVM::IntegerOverflowFlags
+CIRToLLVMBinOpLowering::getIntOverflowFlag(cir::BinOp op) const {
+  if (op.getNoUnsignedWrap())
+    return mlir::LLVM::IntegerOverflowFlags::nuw;
+
+  if (op.getNoSignedWrap())
+    return mlir::LLVM::IntegerOverflowFlags::nsw;
+
+  return mlir::LLVM::IntegerOverflowFlags::none;
+}
+
+static bool isIntTypeUnsigned(mlir::Type type) {
+  // TODO: Ideally, we should only need to check cir::IntType here.
+  return mlir::isa<cir::IntType>(type)
+             ? mlir::cast<cir::IntType>(type).isUnsigned()
+             : mlir::cast<mlir::IntegerType>(type).isUnsigned();
+}
+
+mlir::LogicalResult CIRToLLVMBinOpLowering::matchAndRewrite(
+    cir::BinOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  if (adaptor.getLhs().getType() != adaptor.getRhs().getType())
+    return op.emitError() << "inconsistent operands' types not supported yet";
+
+  mlir::Type type = op.getRhs().getType();
+  assert(!cir::MissingFeatures::vectorType());
+  if (!mlir::isa<cir::IntType, cir::BoolType, cir::CIRFPTypeInterface,
+                 mlir::IntegerType>(type))
+    return op.emitError() << "operand type not supported yet";
+
+  auto llvmTy = getTypeConverter()->convertType(op.getType());
+  mlir::Type llvmEltTy =
+      mlir::isa<mlir::VectorType>(llvmTy)
+          ? mlir::cast<mlir::VectorType>(llvmTy).getElementType()
+          : llvmTy;
+  auto rhs = adaptor.getRhs();
+  auto lhs = adaptor.getLhs();
+
+  type = elementTypeIfVector(type);
+
+  switch (op.getKind()) {
+  case cir::BinOpKind::Add:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy)) {
+      if (op.getSaturated()) {
+        if (isIntTypeUnsigned(type)) {
+          rewriter.replaceOpWithNewOp<mlir::LLVM::UAddSat>(op, lhs, rhs);
+          break;
+        }
+        rewriter.replaceOpWithNewOp<mlir::LLVM::SAddSat>(op, lhs, rhs);
+        break;
+      }
+      rewriter.replaceOpWithNewOp<mlir::LLVM::AddOp>(op, llvmTy, lhs, rhs,
+                                                     getIntOverflowFlag(op));
+    } else {
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(op, lhs, rhs);
+    }
+    break;
+  case cir::BinOpKind::Sub:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy)) {
+      if (op.getSaturated()) {
+        if (isIntTypeUnsigned(type)) {
+          rewriter.replaceOpWithNewOp<mlir::LLVM::USubSat>(op, lhs, rhs);
+          break;
+        }
+        rewriter.replaceOpWithNewOp<mlir::LLVM::SSubSat>(op, lhs, rhs);
+        break;
+      }
+      rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(op, llvmTy, lhs, rhs,
+                                                     getIntOverflowFlag(op));
+    } else {
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FSubOp>(op, lhs, rhs);
+    }
+    break;
+  case cir::BinOpKind::Mul:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy))
+      rewriter.replaceOpWithNewOp<mlir::LLVM::MulOp>(op, llvmTy, lhs, rhs,
+                                                     getIntOverflowFlag(op));
+    else
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FMulOp>(op, lhs, rhs);
+    break;
+  case cir::BinOpKind::Div:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy)) {
+      auto isUnsigned = isIntTypeUnsigned(type);
+      if (isUnsigned)
+        rewriter.replaceOpWithNewOp<mlir::LLVM::UDivOp>(op, lhs, rhs);
+      else
+        rewriter.replaceOpWithNewOp<mlir::LLVM::SDivOp>(op, lhs, rhs);
+    } else {
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FDivOp>(op, lhs, rhs);
+    }
+    break;
+  case cir::BinOpKind::Rem:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy)) {
+      auto isUnsigned = isIntTypeUnsigned(type);
+      if (isUnsigned)
+        rewriter.replaceOpWithNewOp<mlir::LLVM::URemOp>(op, lhs, rhs);
+      else
+        rewriter.replaceOpWithNewOp<mlir::LLVM::SRemOp>(op, lhs, rhs);
+    } else {
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FRemOp>(op, lhs, rhs);
+    }
+    break;
+  case cir::BinOpKind::And:
+    rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(op, lhs, rhs);
+    break;
+  case cir::BinOpKind::Or:
+    rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(op, lhs, rhs);
+    break;
+  case cir::BinOpKind::Xor:
+    rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, lhs, rhs);
+    break;
+  case cir::BinOpKind::Max:
+    if (mlir::isa<mlir::IntegerType>(llvmEltTy)) {
+      auto isUnsigned = isIntTypeUnsigned(type);
+      if (isUnsigned)
+        rewriter.replaceOpWithNewOp<mlir::LLVM::UMaxOp>(op, llvmTy, lhs, rhs);
+      else
+        rewriter.replaceOpWithNewOp<mlir::LLVM::SMaxOp>(op, llvmTy, lhs, rhs);
+    }
+    break;
+  }
+
+  return mlir::LogicalResult::success();
+}
+
 static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
                                  mlir::DataLayout &dataLayout) {
   converter.addConversion([&](cir::PointerType type) -> mlir::Type {
@@ -1132,6 +1255,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
                                             dl);
   patterns.add<
       // clang-format off
+               CIRToLLVMBinOpLowering,
                CIRToLLVMBrCondOpLowering,
                CIRToLLVMBrOpLowering,
                CIRToLLVMFuncOpLowering,

diff  --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index a01a9a5f4f076..ef0bb2deaccdf 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -178,6 +178,17 @@ class CIRToLLVMUnaryOpLowering
                   mlir::ConversionPatternRewriter &) const override;
 };
 
+class CIRToLLVMBinOpLowering : public mlir::OpConversionPattern<cir::BinOp> {
+  mlir::LLVM::IntegerOverflowFlags getIntOverflowFlag(cir::BinOp op) const;
+
+public:
+  using mlir::OpConversionPattern<cir::BinOp>::OpConversionPattern;
+
+  mlir::LogicalResult
+  matchAndRewrite(cir::BinOp op, OpAdaptor,
+                  mlir::ConversionPatternRewriter &) const override;
+};
+
 class CIRToLLVMBrOpLowering : public mlir::OpConversionPattern<cir::BrOp> {
 public:
   using mlir::OpConversionPattern<cir::BrOp>::OpConversionPattern;

diff  --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp
new file mode 100644
index 0000000000000..4c20f79600fac
--- /dev/null
+++ b/clang/test/CIR/CodeGen/binop.cpp
@@ -0,0 +1,33 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -O1 -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+
+void b0(int a, int b) {
+  int x = a * b;
+  x = x / b;
+  x = x % b;
+  x = x + b;
+  x = x - b;
+  x = x & b;
+  x = x ^ b;
+  x = x | b;
+}
+
+// CHECK: %{{.+}} = cir.binop(mul, %{{.+}}, %{{.+}}) nsw : !s32i
+// CHECK: %{{.+}} = cir.binop(div, %{{.+}}, %{{.+}}) : !s32i
+// CHECK: %{{.+}} = cir.binop(rem, %{{.+}}, %{{.+}}) : !s32i
+// CHECK: %{{.+}} = cir.binop(add, %{{.+}}, %{{.+}}) nsw : !s32i
+// CHECK: %{{.+}} = cir.binop(sub, %{{.+}}, %{{.+}}) nsw : !s32i
+// CHECK: %{{.+}} = cir.binop(and, %{{.+}}, %{{.+}}) : !s32i
+// CHECK: %{{.+}} = cir.binop(xor, %{{.+}}, %{{.+}}) : !s32i
+// CHECK: %{{.+}} = cir.binop(or, %{{.+}}, %{{.+}}) : !s32i
+
+void testFloatingPointBinOps(float a, float b) {
+  a * b;
+  // CHECK: cir.binop(mul, %{{.+}}, %{{.+}}) : !cir.float
+  a / b;
+  // CHECK: cir.binop(div, %{{.+}}, %{{.+}}) : !cir.float
+  a + b;
+  // CHECK: cir.binop(add, %{{.+}}, %{{.+}}) : !cir.float
+  a - b;
+  // CHECK: cir.binop(sub, %{{.+}}, %{{.+}}) : !cir.float
+}

diff  --git a/clang/test/CIR/Lowering/binop-bool.cir b/clang/test/CIR/Lowering/binop-bool.cir
new file mode 100644
index 0000000000000..7267c407cc0a7
--- /dev/null
+++ b/clang/test/CIR/Lowering/binop-bool.cir
@@ -0,0 +1,18 @@
+// RUN: cir-opt %s -cir-to-llvm -o %t.mlir
+// RUN: FileCheck --input-file=%t.mlir %s
+
+module {
+  cir.func @foo() {
+    %0 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["a", init] {alignment = 4 : i64}
+    %1 = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["b", init] {alignment = 4 : i64}
+    %2 = cir.load %0 : !cir.ptr<!cir.bool>, !cir.bool
+    %3 = cir.load %1 : !cir.ptr<!cir.bool>, !cir.bool
+    %4 = cir.binop(or, %2, %3) : !cir.bool
+    // CHECK: = llvm.or {{.*}}, {{.*}} : i1
+    %5 = cir.binop(xor, %2, %3) : !cir.bool
+    // CHECK: = llvm.xor {{.*}}, {{.*}} : i1
+    %6 = cir.binop(and, %2, %3) : !cir.bool
+    // CHECK: = llvm.and {{.*}}, {{.*}} : i1
+    cir.return
+  }
+}

diff  --git a/clang/test/CIR/Lowering/binop-fp.cir b/clang/test/CIR/Lowering/binop-fp.cir
new file mode 100644
index 0000000000000..e69a69e6b0991
--- /dev/null
+++ b/clang/test/CIR/Lowering/binop-fp.cir
@@ -0,0 +1,68 @@
+// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR
+// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering  | FileCheck %s -check-prefix=LLVM
+
+module {
+  cir.func @foo() {
+    %0 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["c"] {alignment = 4 : i64}
+    %1 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["d"] {alignment = 4 : i64}
+    %2 = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["y", init] {alignment = 4 : i64}
+    %3 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["e"] {alignment = 8 : i64}
+    %4 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["f"] {alignment = 8 : i64}
+    %5 = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["g", init] {alignment = 8 : i64}
+    %6 = cir.load %0 : !cir.ptr<!cir.float>, !cir.float
+    %7 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float
+    %8 = cir.binop(mul, %6, %7) : !cir.float
+    cir.store %8, %2 : !cir.float, !cir.ptr<!cir.float>
+    %9 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float
+    %10 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float
+    %11 = cir.binop(div, %9, %10) : !cir.float
+    cir.store %11, %2 : !cir.float, !cir.ptr<!cir.float>
+    %12 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float
+    %13 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float
+    %14 = cir.binop(add, %12, %13) : !cir.float
+    cir.store %14, %2 : !cir.float, !cir.ptr<!cir.float>
+    %15 = cir.load %2 : !cir.ptr<!cir.float>, !cir.float
+    %16 = cir.load %1 : !cir.ptr<!cir.float>, !cir.float
+    %17 = cir.binop(sub, %15, %16) : !cir.float
+    cir.store %17, %2 : !cir.float, !cir.ptr<!cir.float>
+    %18 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double
+    %19 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double
+    %20 = cir.binop(add, %18, %19) : !cir.double
+    cir.store %20, %5 : !cir.double, !cir.ptr<!cir.double>
+    %21 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double
+    %22 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double
+    %23 = cir.binop(sub, %21, %22) : !cir.double
+    cir.store %23, %5 : !cir.double, !cir.ptr<!cir.double>
+    %24 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double
+    %25 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double
+    %26 = cir.binop(mul, %24, %25) : !cir.double
+    cir.store %26, %5 : !cir.double, !cir.ptr<!cir.double>
+    %27 = cir.load %3 : !cir.ptr<!cir.double>, !cir.double
+    %28 = cir.load %4 : !cir.ptr<!cir.double>, !cir.double
+    %29 = cir.binop(div, %27, %28) : !cir.double
+    cir.store %29, %5 : !cir.double, !cir.ptr<!cir.double>
+    cir.return
+  }
+}
+
+// MLIR: = llvm.alloca {{.*}} f32 {alignment = 4 : i64} : (i64) -> !llvm.ptr
+// MLIR: = llvm.alloca {{.*}} f64 {alignment = 8 : i64} : (i64) -> !llvm.ptr
+// MLIR: = llvm.fmul {{.*}} : f32
+// MLIR: = llvm.fdiv
+// MLIR: = llvm.fadd
+// MLIR: = llvm.fsub
+// MLIR: = llvm.fadd {{.*}} : f64
+// MLIR: = llvm.fsub
+// MLIR: = llvm.fmul
+// MLIR: = llvm.fdiv
+
+// LLVM: = alloca float, i64
+// LLVM: = alloca double, i64
+// LLVM: = fmul float
+// LLVM: = fdiv float
+// LLVM: = fadd float
+// LLVM: = fsub float
+// LLVM: = fadd double
+// LLVM: = fsub double
+// LLVM: = fmul double
+// LLVM: = fdiv double

diff  --git a/clang/test/CIR/Lowering/binop-overflow.cir b/clang/test/CIR/Lowering/binop-overflow.cir
new file mode 100644
index 0000000000000..68af70aa6abb6
--- /dev/null
+++ b/clang/test/CIR/Lowering/binop-overflow.cir
@@ -0,0 +1,63 @@
+// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR
+// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering -o -  | FileCheck %s -check-prefix=LLVM
+
+!u32i = !cir.int<u, 32>
+!s32i = !cir.int<s, 32>
+
+module {
+  cir.func @test_add_u32_u32_u32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr<!u32i>) -> !cir.bool {
+    %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!u32i, !cir.bool)
+    cir.store %result, %res : !u32i, !cir.ptr<!u32i>
+    cir.return %overflow : !cir.bool
+  }
+
+  //      MLIR: llvm.func @test_add_u32_u32_u32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1
+  // MLIR-NEXT:   %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.uadd.with.overflow.i32"(%[[LHS]], %[[RHS]]) : (i32, i32) -> !llvm.struct<(i32, i1)>
+  // MLIR-NEXT:   %[[#RES:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i32, i1)>
+  // MLIR-NEXT:   %[[#OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i32, i1)>
+  // MLIR-NEXT:   llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr
+  // MLIR-NEXT:   llvm.return %[[#OVFL]] : i1
+  // MLIR-NEXT: }
+
+  //      LLVM: define i1 @test_add_u32_u32_u32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]])
+  // LLVM-NEXT:   %[[#INTRIN_RET:]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %[[#LHS]], i32 %[[#RHS]])
+  // LLVM-NEXT:   %[[#RES:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 0
+  // LLVM-NEXT:   %[[#OVFL:]] = extractvalue { i32, i1 } %[[#INTRIN_RET]], 1
+  // LLVM-NEXT:   store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4
+  // LLVM-NEXT:   ret i1 %[[#OVFL]]
+  // LLVM-NEXT: }
+
+  cir.func @test_add_u32_u32_i32(%lhs: !u32i, %rhs: !u32i, %res: !cir.ptr<!s32i>) -> !cir.bool {
+    %result, %overflow = cir.binop.overflow(add, %lhs, %rhs) : !u32i, (!s32i, !cir.bool)
+    cir.store %result, %res : !s32i, !cir.ptr<!s32i>
+    cir.return %overflow : !cir.bool
+  }
+
+  //      MLIR: llvm.func @test_add_u32_u32_i32(%[[LHS:.+]]: i32, %[[RHS:.+]]: i32, %[[RES_PTR:.+]]: !llvm.ptr) -> i1
+  // MLIR-NEXT:   %[[#LHS_EXT:]] = llvm.zext %[[LHS]] : i32 to i33
+  // MLIR-NEXT:   %[[#RHS_EXT:]] = llvm.zext %[[RHS]] : i32 to i33
+  // MLIR-NEXT:   %[[#INTRIN_RET:]] = llvm.call_intrinsic "llvm.sadd.with.overflow.i33"(%[[#LHS_EXT]], %[[#RHS_EXT]]) : (i33, i33) -> !llvm.struct<(i33, i1)>
+  // MLIR-NEXT:   %[[#RES_EXT:]] = llvm.extractvalue %[[#INTRIN_RET]][0] : !llvm.struct<(i33, i1)>
+  // MLIR-NEXT:   %[[#ARITH_OVFL:]] = llvm.extractvalue %[[#INTRIN_RET]][1] : !llvm.struct<(i33, i1)>
+  // MLIR-NEXT:   %[[#RES:]] = llvm.trunc %[[#RES_EXT]] : i33 to i32
+  // MLIR-NEXT:   %[[#RES_EXT_2:]] = llvm.sext %[[#RES]] : i32 to i33
+  // MLIR-NEXT:   %[[#TRUNC_OVFL:]] = llvm.icmp "ne" %[[#RES_EXT_2]], %[[#RES_EXT]] : i33
+  // MLIR-NEXT:   %[[#OVFL:]] = llvm.or %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]]  : i1
+  // MLIR-NEXT:   llvm.store %[[#RES]], %[[RES_PTR]] {{.*}} : i32, !llvm.ptr
+  // MLIR-NEXT:   llvm.return %[[#OVFL]] : i1
+  // MLIR-NEXT: }
+
+  //      LLVM: define i1 @test_add_u32_u32_i32(i32 %[[#LHS:]], i32 %[[#RHS:]], ptr %[[#RES_PTR:]])
+  // LLVM-NEXT:   %[[#LHS_EXT:]] = zext i32 %[[#LHS]] to i33
+  // LLVM-NEXT:   %[[#RHS_EXT:]] = zext i32 %[[#RHS]] to i33
+  // LLVM-NEXT:   %[[#INTRIN_RET:]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 %[[#LHS_EXT]], i33 %[[#RHS_EXT]])
+  // LLVM-NEXT:   %[[#RES_EXT:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 0
+  // LLVM-NEXT:   %[[#ARITH_OVFL:]] = extractvalue { i33, i1 } %[[#INTRIN_RET]], 1
+  // LLVM-NEXT:   %[[#RES:]] = trunc i33 %[[#RES_EXT]] to i32
+  // LLVM-NEXT:   %[[#RES_EXT_2:]] = sext i32 %[[#RES]] to i33
+  // LLVM-NEXT:   %[[#TRUNC_OVFL:]] = icmp ne i33 %[[#RES_EXT_2]], %[[#RES_EXT]]
+  // LLVM-NEXT:   %[[#OVFL:]] = or i1 %[[#ARITH_OVFL]], %[[#TRUNC_OVFL]]
+  // LLVM-NEXT:   store i32 %[[#RES]], ptr %[[#RES_PTR]], align 4
+  // LLVM-NEXT:   ret i1 %[[#OVFL]]
+  // LLVM-NEXT: }
+}

diff  --git a/clang/test/CIR/Lowering/binop-signed-int.cir b/clang/test/CIR/Lowering/binop-signed-int.cir
new file mode 100644
index 0000000000000..17597f080cd44
--- /dev/null
+++ b/clang/test/CIR/Lowering/binop-signed-int.cir
@@ -0,0 +1,60 @@
+// RUN: cir-opt %s -cir-to-llvm -o %t.mlir
+// RUN: FileCheck --input-file=%t.mlir %s
+
+!s32i = !cir.int<s, 32>
+module {
+  cir.func @foo() {
+    %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init] {alignment = 4 : i64}
+    %1 = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init] {alignment = 4 : i64}
+    %2 = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+    %3 = cir.const #cir.int<2> : !s32i    cir.store %3, %0 : !s32i, !cir.ptr<!s32i>
+    %4 = cir.const #cir.int<1> : !s32i    cir.store %4, %1 : !s32i, !cir.ptr<!s32i>
+    %5 = cir.load %0 : !cir.ptr<!s32i>, !s32i
+    %6 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %7 = cir.binop(mul, %5, %6) : !s32i
+    // CHECK: = llvm.mul
+    cir.store %7, %2 : !s32i, !cir.ptr<!s32i>
+    %8 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %9 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %10 = cir.binop(div, %8, %9) : !s32i
+      // CHECK: = llvm.sdiv
+    cir.store %10, %2 : !s32i, !cir.ptr<!s32i>
+    %11 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %12 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %13 = cir.binop(rem, %11, %12) : !s32i
+    // CHECK: = llvm.srem
+    cir.store %13, %2 : !s32i, !cir.ptr<!s32i>
+    %14 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %15 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %16 = cir.binop(add, %14, %15) : !s32i
+    // CHECK: = llvm.add
+    cir.store %16, %2 : !s32i, !cir.ptr<!s32i>
+    %17 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %18 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %19 = cir.binop(sub, %17, %18) : !s32i
+    // CHECK: = llvm.sub
+    cir.store %19, %2 : !s32i, !cir.ptr<!s32i>
+    %26 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %27 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %28 = cir.binop(and, %26, %27) : !s32i
+    // CHECK: = llvm.and
+    cir.store %28, %2 : !s32i, !cir.ptr<!s32i>
+    %29 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %30 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %31 = cir.binop(xor, %29, %30) : !s32i
+    // CHECK: = llvm.xor
+    cir.store %31, %2 : !s32i, !cir.ptr<!s32i>
+    %32 = cir.load %2 : !cir.ptr<!s32i>, !s32i
+    %33 = cir.load %1 : !cir.ptr<!s32i>, !s32i
+    %34 = cir.binop(or, %32, %33) : !s32i
+    // CHECK: = llvm.or
+    %35 = cir.binop(add, %32, %33) sat: !s32i
+    // CHECK: = llvm.intr.sadd.sat{{.*}}(i32, i32) -> i32
+    %36 = cir.binop(sub, %32, %33) sat: !s32i
+    // CHECK: = llvm.intr.ssub.sat{{.*}}(i32, i32) -> i32 
+    cir.store %34, %2 : !s32i, !cir.ptr<!s32i>
+    %37 = cir.binop(max, %32, %33) : !s32i
+    // CHECK: = llvm.intr.smax
+    cir.return
+  }
+}

diff  --git a/clang/test/CIR/Lowering/binop-unsigned-int.cir b/clang/test/CIR/Lowering/binop-unsigned-int.cir
new file mode 100644
index 0000000000000..46c62b339f2ed
--- /dev/null
+++ b/clang/test/CIR/Lowering/binop-unsigned-int.cir
@@ -0,0 +1,73 @@
+// RUN: cir-opt %s -cir-to-llvm -o - | FileCheck %s -check-prefix=MLIR
+// RUN: cir-translate %s -cir-to-llvmir --target x86_64-unknown-linux-gnu --disable-cc-lowering  | FileCheck %s -check-prefix=LLVM
+!u32i = !cir.int<u, 32>
+
+module {
+  cir.func @foo() {
+    %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init] {alignment = 4 : i64}
+    %1 = cir.alloca !u32i, !cir.ptr<!u32i>, ["b", init] {alignment = 4 : i64}
+    %2 = cir.alloca !u32i, !cir.ptr<!u32i>, ["x", init] {alignment = 4 : i64}
+    %3 = cir.const #cir.int<2> : !u32i    cir.store %3, %0 : !u32i, !cir.ptr<!u32i>
+    %4 = cir.const #cir.int<1> : !u32i    cir.store %4, %1 : !u32i, !cir.ptr<!u32i>
+    %5 = cir.load %0 : !cir.ptr<!u32i>, !u32i
+    %6 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %7 = cir.binop(mul, %5, %6) : !u32i
+    cir.store %7, %2 : !u32i, !cir.ptr<!u32i>
+    %8 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %9 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %10 = cir.binop(div, %8, %9) : !u32i
+    cir.store %10, %2 : !u32i, !cir.ptr<!u32i>
+    %11 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %12 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %13 = cir.binop(rem, %11, %12) : !u32i
+    cir.store %13, %2 : !u32i, !cir.ptr<!u32i>
+    %14 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %15 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %16 = cir.binop(add, %14, %15) : !u32i
+    cir.store %16, %2 : !u32i, !cir.ptr<!u32i>
+    %17 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %18 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %19 = cir.binop(sub, %17, %18) : !u32i
+    cir.store %19, %2 : !u32i, !cir.ptr<!u32i>
+    %26 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %27 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %28 = cir.binop(and, %26, %27) : !u32i
+    cir.store %28, %2 : !u32i, !cir.ptr<!u32i>
+    %29 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %30 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %31 = cir.binop(xor, %29, %30) : !u32i
+    cir.store %31, %2 : !u32i, !cir.ptr<!u32i>
+    %32 = cir.load %2 : !cir.ptr<!u32i>, !u32i
+    %33 = cir.load %1 : !cir.ptr<!u32i>, !u32i
+    %34 = cir.binop(or, %32, %33) : !u32i
+    cir.store %34, %2 : !u32i, !cir.ptr<!u32i>
+    %35 = cir.binop(add, %32, %33) sat: !u32i
+    %36 = cir.binop(sub, %32, %33) sat: !u32i  
+    %37 = cir.binop(max, %32, %33) : !u32i
+    cir.return
+  }
+}
+
+// MLIR: = llvm.mul
+// MLIR: = llvm.udiv
+// MLIR: = llvm.urem
+// MLIR: = llvm.add
+// MLIR: = llvm.sub
+// MLIR: = llvm.and
+// MLIR: = llvm.xor
+// MLIR: = llvm.or
+// MLIR: = llvm.intr.uadd.sat{{.*}}(i32, i32) -> i32
+// MLIR: = llvm.intr.usub.sat{{.*}}(i32, i32) -> i32 
+// MLIR: = llvm.intr.umax
+
+// LLVM: = mul i32
+// LLVM: = udiv i32
+// LLVM: = urem i32
+// LLVM: = add i32
+// LLVM: = sub i32
+// LLVM: = and i32
+// LLVM: = xor i32
+// LLVM: = or i32
+// LLVM: = call i32 @llvm.uadd.sat.i32
+// LLVM: = call i32 @llvm.usub.sat.i32
+// LLVM: = call i32 @llvm.umax.i32


        


More information about the cfe-commits mailing list