[clang] [clang] constexpr `__builtin_elementwise_abs` support (PR #152497)

via cfe-commits cfe-commits at lists.llvm.org
Wed Aug 13 20:58:57 PDT 2025


https://github.com/Mr-Anyone updated https://github.com/llvm/llvm-project/pull/152497

>From b48993d25136a769b3b7bf8e8730be1a92ace067 Mon Sep 17 00:00:00 2001
From: Vincent <llvm at viceroygroup.ca>
Date: Thu, 7 Aug 2025 00:24:55 +0800
Subject: [PATCH 1/2] [clang] constexpr __builtin_elementwise_abs support

Added constant evaluation of `__builtine_elementwise_abs`
on integer and vector type.

fixes #152276
---
 clang/docs/LanguageExtensions.rst             |  3 +-
 clang/docs/ReleaseNotes.rst                   |  2 +
 clang/include/clang/Basic/Builtins.td         |  2 +-
 clang/lib/AST/ByteCode/InterpBuiltin.cpp      | 46 +++++++++++++++++++
 clang/lib/AST/ExprConstant.cpp                | 16 +++++++
 .../test/CodeGen/builtins-elementwise-math.c  |  2 +-
 clang/test/Sema/constant-builtins-vector.cpp  |  6 +++
 7 files changed, 74 insertions(+), 3 deletions(-)

diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index eef3d0c4ccb9d..2db1bae918ada 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -760,7 +760,8 @@ Unless specified otherwise operation(±0) = ±0 and operation(±infinity) = ±in
 The integer elementwise intrinsics, including ``__builtin_elementwise_popcount``,
 ``__builtin_elementwise_bitreverse``, ``__builtin_elementwise_add_sat``,
 ``__builtin_elementwise_sub_sat``, ``__builtin_elementwise_max``,
-``__builtin_elementwise_min`` can be called in a ``constexpr`` context.
+``__builtin_elementwise_min``, and ``__builtin_elementwise_abs`` 
+can be called in a ``constexpr`` context.
 
 No implicit promotion of integer types takes place. The mixing of integer types
 of different sizes and signs is forbidden in binary and ternary builtins.
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index af576f817700a..b5e2f81bc0527 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -115,6 +115,8 @@ Non-comprehensive list of changes in this release
 -------------------------------------------------
 - Added ``__builtin_elementwise_fshl`` and ``__builtin_elementwise_fshr``.
 
+- Added `__builtin_elementwise_abs`.
+
 - Added ``__builtin_elementwise_minnumnum`` and ``__builtin_elementwise_maxnumnum``.
 
 - Trapping UBSan (e.g. ``-fsanitize-trap=undefined``) now emits a string describing the reason for
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 84206cf8b368b..604c9cddfe051 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -1264,7 +1264,7 @@ def NondetermenisticValue : Builtin {
 
 def ElementwiseAbs : Builtin {
   let Spellings = ["__builtin_elementwise_abs"];
-  let Attributes = [NoThrow, Const, CustomTypeChecking];
+  let Attributes = [NoThrow, Const, CustomTypeChecking, Constexpr];
   let Prototype = "void(...)";
 }
 
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 307b77846969f..ebf82486698eb 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -1686,6 +1686,49 @@ static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
   return true;
 }
 
+static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC,
+                                            const InterpFrame *Frame,
+                                            const CallExpr *Call,
+                                            unsigned BuiltinID) {
+  // FIXME: add support of floating point
+  assert(!Call->getArg(0)->getType()->isFloatingType() &&
+         "floating point is currently not supported");
+
+  assert(Call->getNumArgs() == 1);
+  if (Call->getArg(0)->getType()->isIntegerType()) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+
+    pushInteger(S, Val.abs(), Call->getType());
+    return true;
+  }
+
+  // Otherwise, the argument must be a vector.
+  assert(Call->getArg(0)->getType()->isVectorType());
+  const Pointer &Arg = S.Stk.pop<Pointer>();
+  assert(Arg.getFieldDesc()->isPrimitiveArray());
+  const Pointer &Dst = S.Stk.peek<Pointer>();
+  assert(Dst.getFieldDesc()->isPrimitiveArray());
+  assert(Arg.getFieldDesc()->getNumElems() ==
+         Dst.getFieldDesc()->getNumElems());
+
+  QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+  PrimType ElemT = *S.getContext().classify(ElemType);
+  unsigned NumElems = Arg.getNumElems();
+
+  // FIXME: Reading from uninitialized vector elements?
+  for (unsigned I = 0; I != NumElems; ++I) {
+    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+      Dst.elem<T>(I) = T::from(static_cast<T>(
+          APSInt(Arg.elem<T>(I).toAPSInt().abs(),
+                 ElemType->isUnsignedIntegerOrEnumerationType())));
+    });
+  }
+  Dst.initializeAllElements();
+
+  return true;
+}
+
 /// Can be called with an integer or vector as the first and only parameter.
 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
                                                  const InterpFrame *Frame,
@@ -2766,6 +2809,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
     return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call,
                                                 BuiltinID);
 
+  case Builtin::BI__builtin_elementwise_abs:
+    return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
+
   case Builtin::BI__builtin_memcpy:
   case Builtin::BImemcpy:
   case Builtin::BI__builtin_wmemcpy:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 36dd0f5d7a065..1c8d9706788c4 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11610,6 +11610,7 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
   switch (E->getBuiltinCallee()) {
   default:
     return false;
+  case Builtin::BI__builtin_elementwise_abs:
   case Builtin::BI__builtin_elementwise_popcount:
   case Builtin::BI__builtin_elementwise_bitreverse: {
     APValue Source;
@@ -11634,11 +11635,18 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
             APValue(APSInt(Elt.reverseBits(),
                            DestEltTy->isUnsignedIntegerOrEnumerationType())));
         break;
+      case Builtin::BI__builtin_elementwise_abs: {
+        APInt Val = Source.getVectorElt(EltNum).getInt().abs();
+        ResultElements.push_back(APValue(
+            APSInt(Val, DestEltTy->isUnsignedIntegerOrEnumerationType())));
+        break;
+      }
       }
     }
 
     return Success(APValue(ResultElements.data(), ResultElements.size()), E);
   }
+
   case Builtin::BI__builtin_elementwise_add_sat:
   case Builtin::BI__builtin_elementwise_sub_sat:
   case clang::X86::BI__builtin_ia32_pmulhuw128:
@@ -13387,6 +13395,14 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
     return Success(Operand, E);
   }
 
+  case Builtin::BI__builtin_elementwise_abs: {
+    APSInt Val;
+    if (!EvaluateInteger(E->getArg(0), Val, Info))
+      return false;
+
+    return Success(Val.abs(), E);
+  }
+
   case Builtin::BI__builtin_expect:
   case Builtin::BI__builtin_expect_with_probability:
     return Visit(E->getArg(0));
diff --git a/clang/test/CodeGen/builtins-elementwise-math.c b/clang/test/CodeGen/builtins-elementwise-math.c
index bb5d0351db1a2..e37e5353603a0 100644
--- a/clang/test/CodeGen/builtins-elementwise-math.c
+++ b/clang/test/CodeGen/builtins-elementwise-math.c
@@ -66,7 +66,7 @@ void test_builtin_elementwise_abs(float f1, float f2, double d1, double d2,
   // CHECK-NEXT: call i32 @llvm.abs.i32(i32 [[IA1]], i1 false)
   b = __builtin_elementwise_abs(int_as_one);
 
-  // CHECK:   call i32 @llvm.abs.i32(i32 -10, i1 false)
+  // CHECK:   store i32 %elt.abs11, ptr @b, align 4
   b = __builtin_elementwise_abs(-10);
 
   // CHECK:      [[SI:%.+]] = load i16, ptr %si.addr, align 2
diff --git a/clang/test/Sema/constant-builtins-vector.cpp b/clang/test/Sema/constant-builtins-vector.cpp
index bc575dca98d77..85fb4930d09c0 100644
--- a/clang/test/Sema/constant-builtins-vector.cpp
+++ b/clang/test/Sema/constant-builtins-vector.cpp
@@ -876,3 +876,9 @@ static_assert(__builtin_elementwise_min(~0U, 0U) == 0U);
 static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_min((vector4char){1, -2, 3, -4}, (vector4char){4, -3, 2, -1})) == (LITTLE_END ? 0xFC02FD01 : 0x01FD02FC));
 static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_min((vector4uchar){1, 2, 3, 4}, (vector4uchar){4, 3, 2, 1})) == 0x01020201U);
 static_assert(__builtin_bit_cast(unsigned long long, __builtin_elementwise_min((vector4short){1, -2, 3, -4}, (vector4short){4, -3, 2, -1})) == (LITTLE_END ? 0xFFFC0002FFFD0001 : 0x0001FFFD0002FFFC));
+
+static_assert(__builtin_elementwise_abs(10) == 10);
+static_assert(__builtin_elementwise_abs(-10) == 10);
+static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_abs((vector4char){-1, -2, -3, 4})) == (LITTLE_END ? 0x04030201 : 0x01020304));
+// the absolute value of the most negative integer remains the most negative integer
+static_assert(__builtin_elementwise_abs((int)(-2147483648)) == (int)(-2147483648));

>From b9b9e13be25ba03388b77d47df76364052470670 Mon Sep 17 00:00:00 2001
From: Vincent <llvm at viceroygroup.ca>
Date: Thu, 14 Aug 2025 00:34:44 +0800
Subject: [PATCH 2/2] Added Floating Support and More Floating Point Test Case

---
 clang/lib/AST/ByteCode/InterpBuiltin.cpp     | 3292 +++++++++---------
 clang/lib/AST/ExprConstant.cpp               |   31 +-
 clang/test/Sema/constant-builtins-vector.cpp |   16 +-
 3 files changed, 1689 insertions(+), 1650 deletions(-)

diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index ebf82486698eb..b65af477b6e38 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -598,512 +598,515 @@ static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
   return true;
 }
 
+static inline Floating abs(InterpState& S, const Floating& In){
+    if(!In.isNegative())
+        return In;
+
+    Floating Output = S.allocFloat(In.getSemantics());
+    APFloat New = In.getAPFloat();
+    New.changeSign();
+    Output.copy(New);
+    return Output;
+}
+
+
 // The C standard says "fabs raises no floating-point exceptions,
 // even if x is a signaling NaN. The returned value is independent of
 // the current rounding direction mode."  Therefore constant folding can
 // proceed without regard to the floating point settings.
 // Reference, WG14 N2478 F.10.4.3
 static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
-                                 const InterpFrame *Frame) {
-  const Floating &Val = S.Stk.pop<Floating>();
-  APFloat F = Val.getAPFloat();
-  if (!F.isNegative()) {
-    S.Stk.push<Floating>(Val);
+        const InterpFrame *Frame) {
+    const Floating &Val = S.Stk.pop<Floating>();
+    S.Stk.push<Floating>(abs(S, Val));
     return true;
-  }
-
-  Floating Result = S.allocFloat(Val.getSemantics());
-  F.changeSign();
-  Result.copy(F);
-  S.Stk.push<Floating>(Result);
-  return true;
 }
 
 static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
-                                const InterpFrame *Frame,
-                                const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  if (Val ==
-      APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
-    return false;
-  if (Val.isNegative())
-    Val.negate();
-  pushInteger(S, Val, Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    if (Val ==
+            APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
+        return false;
+    if (Val.isNegative())
+        Val.negate();
+    pushInteger(S, Val, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
-                                     const InterpFrame *Frame,
-                                     const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  pushInteger(S, Val.popcount(), Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    pushInteger(S, Val.popcount(), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
-                                   const InterpFrame *Frame,
-                                   const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  pushInteger(S, Val.popcount() % 2, Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    pushInteger(S, Val.popcount() % 2, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
-                                  const InterpFrame *Frame,
-                                  const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
-                                       const InterpFrame *Frame,
-                                       const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  pushInteger(S, Val.reverseBits(), Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    pushInteger(S, Val.reverseBits(), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
-                                          const InterpFrame *Frame,
-                                          const CallExpr *Call) {
-  // This is an unevaluated call, so there are no arguments on the stack.
-  assert(Call->getNumArgs() == 1);
-  const Expr *Arg = Call->getArg(0);
-
-  GCCTypeClass ResultClass =
-      EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
-  int32_t ReturnVal = static_cast<int32_t>(ResultClass);
-  pushInteger(S, ReturnVal, Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    // This is an unevaluated call, so there are no arguments on the stack.
+    assert(Call->getNumArgs() == 1);
+    const Expr *Arg = Call->getArg(0);
+
+    GCCTypeClass ResultClass =
+        EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
+    int32_t ReturnVal = static_cast<int32_t>(ResultClass);
+    pushInteger(S, ReturnVal, Call->getType());
+    return true;
 }
 
 // __builtin_expect(long, long)
 // __builtin_expect_with_probability(long, long, double)
 static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
-                                   const InterpFrame *Frame,
-                                   const CallExpr *Call) {
-  // The return value is simply the value of the first parameter.
-  // We ignore the probability.
-  unsigned NumArgs = Call->getNumArgs();
-  assert(NumArgs == 2 || NumArgs == 3);
-
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  if (NumArgs == 3)
-    S.Stk.discard<Floating>();
-  discard(S.Stk, ArgT);
-
-  APSInt Val = popToAPSInt(S.Stk, ArgT);
-  pushInteger(S, Val, Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    // The return value is simply the value of the first parameter.
+    // We ignore the probability.
+    unsigned NumArgs = Call->getNumArgs();
+    assert(NumArgs == 2 || NumArgs == 3);
+
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    if (NumArgs == 3)
+        S.Stk.discard<Floating>();
+    discard(S.Stk, ArgT);
+
+    APSInt Val = popToAPSInt(S.Stk, ArgT);
+    pushInteger(S, Val, Call->getType());
+    return true;
 }
 
 /// rotateleft(value, amount)
 static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
-                                   const InterpFrame *Frame,
-                                   const CallExpr *Call, bool Right) {
-  PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
-  PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
-
-  APSInt Amount = popToAPSInt(S.Stk, AmountT);
-  APSInt Value = popToAPSInt(S.Stk, ValueT);
+        const InterpFrame *Frame,
+        const CallExpr *Call, bool Right) {
+    PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
+    PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
 
-  APSInt Result;
-  if (Right)
-    Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
-                    /*IsUnsigned=*/true);
-  else // Left.
-    Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
-                    /*IsUnsigned=*/true);
+    APSInt Amount = popToAPSInt(S.Stk, AmountT);
+    APSInt Value = popToAPSInt(S.Stk, ValueT);
 
-  pushInteger(S, Result, Call->getType());
-  return true;
+    APSInt Result;
+    if (Right)
+        Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
+                /*IsUnsigned=*/true);
+    else // Left.
+        Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
+                /*IsUnsigned=*/true);
+
+    pushInteger(S, Result, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
-                                const InterpFrame *Frame,
-                                const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Value = popToAPSInt(S.Stk, ArgT);
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Value = popToAPSInt(S.Stk, ArgT);
 
-  uint64_t N = Value.countr_zero();
-  pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
-  return true;
+    uint64_t N = Value.countr_zero();
+    pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
-                                      const InterpFrame *Frame,
-                                      const CallExpr *Call) {
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
 #ifndef NDEBUG
-  assert(Call->getArg(0)->isLValue());
-  PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
-  assert(PtrT == PT_Ptr &&
-         "Unsupported pointer type passed to __builtin_addressof()");
+    assert(Call->getArg(0)->isLValue());
+    PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
+    assert(PtrT == PT_Ptr &&
+            "Unsupported pointer type passed to __builtin_addressof()");
 #endif
-  return true;
+    return true;
 }
 
 static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
-                                 const InterpFrame *Frame,
-                                 const CallExpr *Call) {
-  return Call->getDirectCallee()->isConstexpr();
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    return Call->getDirectCallee()->isConstexpr();
 }
 
 static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
-                                                 const InterpFrame *Frame,
-                                                 const CallExpr *Call) {
-  PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt Arg = popToAPSInt(S.Stk, ArgT);
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt Arg = popToAPSInt(S.Stk, ArgT);
 
-  int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
-      Arg.getZExtValue());
-  pushInteger(S, Result, Call->getType());
-  return true;
+    int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
+            Arg.getZExtValue());
+    pushInteger(S, Result, Call->getType());
+    return true;
 }
 
 // Two integral values followed by a pointer (lhs, rhs, resultOut)
 static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
-                                       const CallExpr *Call,
-                                       unsigned BuiltinOp) {
-  const Pointer &ResultPtr = S.Stk.pop<Pointer>();
-  if (ResultPtr.isDummy())
-    return false;
+        const CallExpr *Call,
+        unsigned BuiltinOp) {
+    const Pointer &ResultPtr = S.Stk.pop<Pointer>();
+    if (ResultPtr.isDummy())
+        return false;
 
-  PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
-  PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
-  APSInt RHS = popToAPSInt(S.Stk, RHST);
-  APSInt LHS = popToAPSInt(S.Stk, LHST);
-  QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
-  PrimType ResultT = *S.getContext().classify(ResultType);
-  bool Overflow;
-
-  APSInt Result;
-  if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
-      BuiltinOp == Builtin::BI__builtin_sub_overflow ||
-      BuiltinOp == Builtin::BI__builtin_mul_overflow) {
-    bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
-                    ResultType->isSignedIntegerOrEnumerationType();
-    bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
-                     ResultType->isSignedIntegerOrEnumerationType();
-    uint64_t LHSSize = LHS.getBitWidth();
-    uint64_t RHSSize = RHS.getBitWidth();
-    uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
-    uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
-
-    // Add an additional bit if the signedness isn't uniformly agreed to. We
-    // could do this ONLY if there is a signed and an unsigned that both have
-    // MaxBits, but the code to check that is pretty nasty.  The issue will be
-    // caught in the shrink-to-result later anyway.
-    if (IsSigned && !AllSigned)
-      ++MaxBits;
-
-    LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
-    RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
-    Result = APSInt(MaxBits, !IsSigned);
-  }
+    PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
+    PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
+    APSInt RHS = popToAPSInt(S.Stk, RHST);
+    APSInt LHS = popToAPSInt(S.Stk, LHST);
+    QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
+    PrimType ResultT = *S.getContext().classify(ResultType);
+    bool Overflow;
 
-  // Find largest int.
-  switch (BuiltinOp) {
-  default:
-    llvm_unreachable("Invalid value for BuiltinOp");
-  case Builtin::BI__builtin_add_overflow:
-  case Builtin::BI__builtin_sadd_overflow:
-  case Builtin::BI__builtin_saddl_overflow:
-  case Builtin::BI__builtin_saddll_overflow:
-  case Builtin::BI__builtin_uadd_overflow:
-  case Builtin::BI__builtin_uaddl_overflow:
-  case Builtin::BI__builtin_uaddll_overflow:
-    Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
-                            : LHS.uadd_ov(RHS, Overflow);
-    break;
-  case Builtin::BI__builtin_sub_overflow:
-  case Builtin::BI__builtin_ssub_overflow:
-  case Builtin::BI__builtin_ssubl_overflow:
-  case Builtin::BI__builtin_ssubll_overflow:
-  case Builtin::BI__builtin_usub_overflow:
-  case Builtin::BI__builtin_usubl_overflow:
-  case Builtin::BI__builtin_usubll_overflow:
-    Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
-                            : LHS.usub_ov(RHS, Overflow);
-    break;
-  case Builtin::BI__builtin_mul_overflow:
-  case Builtin::BI__builtin_smul_overflow:
-  case Builtin::BI__builtin_smull_overflow:
-  case Builtin::BI__builtin_smulll_overflow:
-  case Builtin::BI__builtin_umul_overflow:
-  case Builtin::BI__builtin_umull_overflow:
-  case Builtin::BI__builtin_umulll_overflow:
-    Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
-                            : LHS.umul_ov(RHS, Overflow);
-    break;
-  }
+    APSInt Result;
+    if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+            BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+            BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+        bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
+            ResultType->isSignedIntegerOrEnumerationType();
+        bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
+            ResultType->isSignedIntegerOrEnumerationType();
+        uint64_t LHSSize = LHS.getBitWidth();
+        uint64_t RHSSize = RHS.getBitWidth();
+        uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
+        uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
+
+        // Add an additional bit if the signedness isn't uniformly agreed to. We
+        // could do this ONLY if there is a signed and an unsigned that both have
+        // MaxBits, but the code to check that is pretty nasty.  The issue will be
+        // caught in the shrink-to-result later anyway.
+        if (IsSigned && !AllSigned)
+            ++MaxBits;
+
+        LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
+        RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
+        Result = APSInt(MaxBits, !IsSigned);
+    }
 
-  // In the case where multiple sizes are allowed, truncate and see if
-  // the values are the same.
-  if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
-      BuiltinOp == Builtin::BI__builtin_sub_overflow ||
-      BuiltinOp == Builtin::BI__builtin_mul_overflow) {
-    // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
-    // since it will give us the behavior of a TruncOrSelf in the case where
-    // its parameter <= its size.  We previously set Result to be at least the
-    // type-size of the result, so getTypeSize(ResultType) <= Resu
-    APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
-    Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
-
-    if (!APSInt::isSameValue(Temp, Result))
-      Overflow = true;
-    Result = std::move(Temp);
-  }
+    // Find largest int.
+    switch (BuiltinOp) {
+        default:
+            llvm_unreachable("Invalid value for BuiltinOp");
+        case Builtin::BI__builtin_add_overflow:
+        case Builtin::BI__builtin_sadd_overflow:
+        case Builtin::BI__builtin_saddl_overflow:
+        case Builtin::BI__builtin_saddll_overflow:
+        case Builtin::BI__builtin_uadd_overflow:
+        case Builtin::BI__builtin_uaddl_overflow:
+        case Builtin::BI__builtin_uaddll_overflow:
+            Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
+                : LHS.uadd_ov(RHS, Overflow);
+            break;
+        case Builtin::BI__builtin_sub_overflow:
+        case Builtin::BI__builtin_ssub_overflow:
+        case Builtin::BI__builtin_ssubl_overflow:
+        case Builtin::BI__builtin_ssubll_overflow:
+        case Builtin::BI__builtin_usub_overflow:
+        case Builtin::BI__builtin_usubl_overflow:
+        case Builtin::BI__builtin_usubll_overflow:
+            Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
+                : LHS.usub_ov(RHS, Overflow);
+            break;
+        case Builtin::BI__builtin_mul_overflow:
+        case Builtin::BI__builtin_smul_overflow:
+        case Builtin::BI__builtin_smull_overflow:
+        case Builtin::BI__builtin_smulll_overflow:
+        case Builtin::BI__builtin_umul_overflow:
+        case Builtin::BI__builtin_umull_overflow:
+        case Builtin::BI__builtin_umulll_overflow:
+            Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
+                : LHS.umul_ov(RHS, Overflow);
+            break;
+    }
+
+    // In the case where multiple sizes are allowed, truncate and see if
+    // the values are the same.
+    if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+            BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+            BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+        // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
+        // since it will give us the behavior of a TruncOrSelf in the case where
+        // its parameter <= its size.  We previously set Result to be at least the
+        // type-size of the result, so getTypeSize(ResultType) <= Resu
+        APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
+        Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
+
+        if (!APSInt::isSameValue(Temp, Result))
+            Overflow = true;
+        Result = std::move(Temp);
+    }
 
-  // Write Result to ResultPtr and put Overflow on the stack.
-  assignInteger(S, ResultPtr, ResultT, Result);
-  if (ResultPtr.canBeInitialized())
-    ResultPtr.initialize();
+    // Write Result to ResultPtr and put Overflow on the stack.
+    assignInteger(S, ResultPtr, ResultT, Result);
+    if (ResultPtr.canBeInitialized())
+        ResultPtr.initialize();
 
-  assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
-  S.Stk.push<Boolean>(Overflow);
-  return true;
+    assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
+    S.Stk.push<Boolean>(Overflow);
+    return true;
 }
 
 /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
 static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
-                                    const InterpFrame *Frame,
-                                    const CallExpr *Call, unsigned BuiltinOp) {
-  const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
-  PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
-  PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
-  APSInt CarryIn = popToAPSInt(S.Stk, LHST);
-  APSInt RHS = popToAPSInt(S.Stk, RHST);
-  APSInt LHS = popToAPSInt(S.Stk, LHST);
-
-  APSInt CarryOut;
-
-  APSInt Result;
-  // Copy the number of bits and sign.
-  Result = LHS;
-  CarryOut = LHS;
-
-  bool FirstOverflowed = false;
-  bool SecondOverflowed = false;
-  switch (BuiltinOp) {
-  default:
-    llvm_unreachable("Invalid value for BuiltinOp");
-  case Builtin::BI__builtin_addcb:
-  case Builtin::BI__builtin_addcs:
-  case Builtin::BI__builtin_addc:
-  case Builtin::BI__builtin_addcl:
-  case Builtin::BI__builtin_addcll:
-    Result =
-        LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
-    break;
-  case Builtin::BI__builtin_subcb:
-  case Builtin::BI__builtin_subcs:
-  case Builtin::BI__builtin_subc:
-  case Builtin::BI__builtin_subcl:
-  case Builtin::BI__builtin_subcll:
-    Result =
-        LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
-    break;
-  }
-  // It is possible for both overflows to happen but CGBuiltin uses an OR so
-  // this is consistent.
-  CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
+        const InterpFrame *Frame,
+        const CallExpr *Call, unsigned BuiltinOp) {
+    const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
+    PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
+    PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
+    APSInt CarryIn = popToAPSInt(S.Stk, LHST);
+    APSInt RHS = popToAPSInt(S.Stk, RHST);
+    APSInt LHS = popToAPSInt(S.Stk, LHST);
 
-  QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
-  PrimType CarryOutT = *S.getContext().classify(CarryOutType);
-  assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
-  CarryOutPtr.initialize();
+    APSInt CarryOut;
 
-  assert(Call->getType() == Call->getArg(0)->getType());
-  pushInteger(S, Result, Call->getType());
-  return true;
+    APSInt Result;
+    // Copy the number of bits and sign.
+    Result = LHS;
+    CarryOut = LHS;
+
+    bool FirstOverflowed = false;
+    bool SecondOverflowed = false;
+    switch (BuiltinOp) {
+        default:
+            llvm_unreachable("Invalid value for BuiltinOp");
+        case Builtin::BI__builtin_addcb:
+        case Builtin::BI__builtin_addcs:
+        case Builtin::BI__builtin_addc:
+        case Builtin::BI__builtin_addcl:
+        case Builtin::BI__builtin_addcll:
+            Result =
+                LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
+            break;
+        case Builtin::BI__builtin_subcb:
+        case Builtin::BI__builtin_subcs:
+        case Builtin::BI__builtin_subc:
+        case Builtin::BI__builtin_subcl:
+        case Builtin::BI__builtin_subcll:
+            Result =
+                LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
+            break;
+    }
+    // It is possible for both overflows to happen but CGBuiltin uses an OR so
+    // this is consistent.
+    CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
+
+    QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
+    PrimType CarryOutT = *S.getContext().classify(CarryOutType);
+    assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
+    CarryOutPtr.initialize();
+
+    assert(Call->getType() == Call->getArg(0)->getType());
+    pushInteger(S, Result, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
-                                const InterpFrame *Frame, const CallExpr *Call,
-                                unsigned BuiltinOp) {
+        const InterpFrame *Frame, const CallExpr *Call,
+        unsigned BuiltinOp) {
 
-  std::optional<APSInt> Fallback;
-  if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
-    PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
-    Fallback = popToAPSInt(S.Stk, FallbackT);
-  }
-  PrimType ValT = *S.getContext().classify(Call->getArg(0));
-  const APSInt &Val = popToAPSInt(S.Stk, ValT);
-
-  // When the argument is 0, the result of GCC builtins is undefined, whereas
-  // for Microsoft intrinsics, the result is the bit-width of the argument.
-  bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
-                         BuiltinOp != Builtin::BI__lzcnt &&
-                         BuiltinOp != Builtin::BI__lzcnt64;
-
-  if (Val == 0) {
-    if (Fallback) {
-      pushInteger(S, *Fallback, Call->getType());
-      return true;
+    std::optional<APSInt> Fallback;
+    if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
+        PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
+        Fallback = popToAPSInt(S.Stk, FallbackT);
     }
+    PrimType ValT = *S.getContext().classify(Call->getArg(0));
+    const APSInt &Val = popToAPSInt(S.Stk, ValT);
+
+    // When the argument is 0, the result of GCC builtins is undefined, whereas
+    // for Microsoft intrinsics, the result is the bit-width of the argument.
+    bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
+        BuiltinOp != Builtin::BI__lzcnt &&
+        BuiltinOp != Builtin::BI__lzcnt64;
+
+    if (Val == 0) {
+        if (Fallback) {
+            pushInteger(S, *Fallback, Call->getType());
+            return true;
+        }
 
-    if (ZeroIsUndefined)
-      return false;
-  }
+        if (ZeroIsUndefined)
+            return false;
+    }
 
-  pushInteger(S, Val.countl_zero(), Call->getType());
-  return true;
+    pushInteger(S, Val.countl_zero(), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
-                                const InterpFrame *Frame, const CallExpr *Call,
-                                unsigned BuiltinID) {
-  std::optional<APSInt> Fallback;
-  if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
-    PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
-    Fallback = popToAPSInt(S.Stk, FallbackT);
-  }
-  PrimType ValT = *S.getContext().classify(Call->getArg(0));
-  const APSInt &Val = popToAPSInt(S.Stk, ValT);
+        const InterpFrame *Frame, const CallExpr *Call,
+        unsigned BuiltinID) {
+    std::optional<APSInt> Fallback;
+    if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
+        PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
+        Fallback = popToAPSInt(S.Stk, FallbackT);
+    }
+    PrimType ValT = *S.getContext().classify(Call->getArg(0));
+    const APSInt &Val = popToAPSInt(S.Stk, ValT);
 
-  if (Val == 0) {
-    if (Fallback) {
-      pushInteger(S, *Fallback, Call->getType());
-      return true;
+    if (Val == 0) {
+        if (Fallback) {
+            pushInteger(S, *Fallback, Call->getType());
+            return true;
+        }
+        return false;
     }
-    return false;
-  }
 
-  pushInteger(S, Val.countr_zero(), Call->getType());
-  return true;
+    pushInteger(S, Val.countr_zero(), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
-                                  const InterpFrame *Frame,
-                                  const CallExpr *Call) {
-  PrimType ReturnT = *S.getContext().classify(Call->getType());
-  PrimType ValT = *S.getContext().classify(Call->getArg(0));
-  const APSInt &Val = popToAPSInt(S.Stk, ValT);
-  assert(Val.getActiveBits() <= 64);
-
-  INT_TYPE_SWITCH(ReturnT,
-                  { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ReturnT = *S.getContext().classify(Call->getType());
+    PrimType ValT = *S.getContext().classify(Call->getArg(0));
+    const APSInt &Val = popToAPSInt(S.Stk, ValT);
+    assert(Val.getActiveBits() <= 64);
+
+    INT_TYPE_SWITCH(ReturnT,
+            { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
+    return true;
 }
 
 /// bool __atomic_always_lock_free(size_t, void const volatile*)
 /// bool __atomic_is_lock_free(size_t, void const volatile*)
 static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
-                                             const InterpFrame *Frame,
-                                             const CallExpr *Call,
-                                             unsigned BuiltinOp) {
-  auto returnBool = [&S](bool Value) -> bool {
-    S.Stk.push<Boolean>(Value);
-    return true;
-  };
-
-  PrimType ValT = *S.getContext().classify(Call->getArg(0));
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
-  const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
-
-  // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
-  // of two less than or equal to the maximum inline atomic width, we know it
-  // is lock-free.  If the size isn't a power of two, or greater than the
-  // maximum alignment where we promote atomics, we know it is not lock-free
-  // (at least not in the sense of atomic_is_lock_free).  Otherwise,
-  // the answer can only be determined at runtime; for example, 16-byte
-  // atomics have lock-free implementations on some, but not all,
-  // x86-64 processors.
-
-  // Check power-of-two.
-  CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
-  if (Size.isPowerOfTwo()) {
-    // Check against inlining width.
-    unsigned InlineWidthBits =
-        S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
-    if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
-
-      // OK, we will inline appropriately-aligned operations of this size,
-      // and _Atomic(T) is appropriately-aligned.
-      if (Size == CharUnits::One())
-        return returnBool(true);
-
-      // Same for null pointers.
-      assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
-      if (Ptr.isZero())
-        return returnBool(true);
-
-      if (Ptr.isIntegralPointer()) {
-        uint64_t IntVal = Ptr.getIntegerRepresentation();
-        if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
-          return returnBool(true);
-      }
-
-      const Expr *PtrArg = Call->getArg(1);
-      // Otherwise, check if the type's alignment against Size.
-      if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
-        // Drop the potential implicit-cast to 'const volatile void*', getting
-        // the underlying type.
-        if (ICE->getCastKind() == CK_BitCast)
-          PtrArg = ICE->getSubExpr();
-      }
+        const InterpFrame *Frame,
+        const CallExpr *Call,
+        unsigned BuiltinOp) {
+    auto returnBool = [&S](bool Value) -> bool {
+        S.Stk.push<Boolean>(Value);
+        return true;
+    };
 
-      if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
-        QualType PointeeType = PtrTy->getPointeeType();
-        if (!PointeeType->isIncompleteType() &&
-            S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
-          // OK, we will inline operations on this object.
-          return returnBool(true);
+    PrimType ValT = *S.getContext().classify(Call->getArg(0));
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
+    const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
+
+    // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+    // of two less than or equal to the maximum inline atomic width, we know it
+    // is lock-free.  If the size isn't a power of two, or greater than the
+    // maximum alignment where we promote atomics, we know it is not lock-free
+    // (at least not in the sense of atomic_is_lock_free).  Otherwise,
+    // the answer can only be determined at runtime; for example, 16-byte
+    // atomics have lock-free implementations on some, but not all,
+    // x86-64 processors.
+
+    // Check power-of-two.
+    CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+    if (Size.isPowerOfTwo()) {
+        // Check against inlining width.
+        unsigned InlineWidthBits =
+            S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
+        if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
+
+            // OK, we will inline appropriately-aligned operations of this size,
+            // and _Atomic(T) is appropriately-aligned.
+            if (Size == CharUnits::One())
+                return returnBool(true);
+
+            // Same for null pointers.
+            assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
+            if (Ptr.isZero())
+                return returnBool(true);
+
+            if (Ptr.isIntegralPointer()) {
+                uint64_t IntVal = Ptr.getIntegerRepresentation();
+                if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
+                    return returnBool(true);
+            }
+
+            const Expr *PtrArg = Call->getArg(1);
+            // Otherwise, check if the type's alignment against Size.
+            if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
+                // Drop the potential implicit-cast to 'const volatile void*', getting
+                // the underlying type.
+                if (ICE->getCastKind() == CK_BitCast)
+                    PtrArg = ICE->getSubExpr();
+            }
+
+            if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
+                QualType PointeeType = PtrTy->getPointeeType();
+                if (!PointeeType->isIncompleteType() &&
+                        S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
+                    // OK, we will inline operations on this object.
+                    return returnBool(true);
+                }
+            }
         }
-      }
     }
-  }
 
-  if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
-    return returnBool(false);
+    if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
+        return returnBool(false);
 
-  return false;
+    return false;
 }
 
 /// bool __c11_atomic_is_lock_free(size_t)
 static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S,
-                                                    CodePtr OpPC,
-                                                    const InterpFrame *Frame,
-                                                    const CallExpr *Call) {
-  PrimType ValT = *S.getContext().classify(Call->getArg(0));
-  const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
-
-  auto returnBool = [&S](bool Value) -> bool {
-    S.Stk.push<Boolean>(Value);
-    return true;
-  };
-
-  CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
-  if (Size.isPowerOfTwo()) {
-    // Check against inlining width.
-    unsigned InlineWidthBits =
-        S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
-    if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits))
-      return returnBool(true);
-  }
+        CodePtr OpPC,
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType ValT = *S.getContext().classify(Call->getArg(0));
+    const APSInt &SizeVal = popToAPSInt(S.Stk, ValT);
+
+    auto returnBool = [&S](bool Value) -> bool {
+        S.Stk.push<Boolean>(Value);
+        return true;
+    };
+
+    CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+    if (Size.isPowerOfTwo()) {
+        // Check against inlining width.
+        unsigned InlineWidthBits =
+            S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
+        if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits))
+            return returnBool(true);
+    }
 
-  return false; // returnBool(false);
+    return false; // returnBool(false);
 }
 
 /// __builtin_complex(Float A, float B);
 static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
-                                    const InterpFrame *Frame,
-                                    const CallExpr *Call) {
-  const Floating &Arg2 = S.Stk.pop<Floating>();
-  const Floating &Arg1 = S.Stk.pop<Floating>();
-  Pointer &Result = S.Stk.peek<Pointer>();
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    const Floating &Arg2 = S.Stk.pop<Floating>();
+    const Floating &Arg1 = S.Stk.pop<Floating>();
+    Pointer &Result = S.Stk.peek<Pointer>();
 
-  Result.elem<Floating>(0) = Arg1;
-  Result.elem<Floating>(1) = Arg2;
-  Result.initializeAllElements();
+    Result.elem<Floating>(0) = Arg1;
+    Result.elem<Floating>(1) = Arg2;
+    Result.initializeAllElements();
 
-  return true;
+    return true;
 }
 
 /// __builtin_is_aligned()
@@ -1112,1474 +1115,1481 @@ static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
 /// The first parameter is either an integer or a pointer.
 /// The second parameter is the requested alignment as an integer.
 static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
-                                               const InterpFrame *Frame,
-                                               const CallExpr *Call,
-                                               unsigned BuiltinOp) {
-  PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
-  const APSInt &Alignment = popToAPSInt(S.Stk, AlignmentT);
-
-  if (Alignment < 0 || !Alignment.isPowerOf2()) {
-    S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
-    return false;
-  }
-  unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
-  APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
-  if (APSInt::compareValues(Alignment, MaxValue) > 0) {
-    S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
-        << MaxValue << Call->getArg(0)->getType() << Alignment;
-    return false;
-  }
+        const InterpFrame *Frame,
+        const CallExpr *Call,
+        unsigned BuiltinOp) {
+    PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
+    const APSInt &Alignment = popToAPSInt(S.Stk, AlignmentT);
+
+    if (Alignment < 0 || !Alignment.isPowerOf2()) {
+        S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
+        return false;
+    }
+    unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
+    APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
+    if (APSInt::compareValues(Alignment, MaxValue) > 0) {
+        S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
+            << MaxValue << Call->getArg(0)->getType() << Alignment;
+        return false;
+    }
 
-  // The first parameter is either an integer or a pointer (but not a function
-  // pointer).
-  PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
-
-  if (isIntegralType(FirstArgT)) {
-    const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
-    APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
-    if (BuiltinOp == Builtin::BI__builtin_align_up) {
-      APSInt AlignedVal =
-          APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
-      pushInteger(S, AlignedVal, Call->getType());
-    } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
-      APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
-      pushInteger(S, AlignedVal, Call->getType());
-    } else {
-      assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
-      S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
+    // The first parameter is either an integer or a pointer (but not a function
+    // pointer).
+    PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
+
+    if (isIntegralType(FirstArgT)) {
+        const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
+        APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
+        if (BuiltinOp == Builtin::BI__builtin_align_up) {
+            APSInt AlignedVal =
+                APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
+            pushInteger(S, AlignedVal, Call->getType());
+        } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
+            APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
+            pushInteger(S, AlignedVal, Call->getType());
+        } else {
+            assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
+            S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
+        }
+        return true;
     }
-    return true;
-  }
 
-  assert(FirstArgT == PT_Ptr);
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
+    assert(FirstArgT == PT_Ptr);
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
 
-  unsigned PtrOffset = Ptr.getByteOffset();
-  PtrOffset = Ptr.getIndex();
-  CharUnits BaseAlignment =
-      S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
-  CharUnits PtrAlign =
-      BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
+    unsigned PtrOffset = Ptr.getByteOffset();
+    PtrOffset = Ptr.getIndex();
+    CharUnits BaseAlignment =
+        S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
+    CharUnits PtrAlign =
+        BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
+
+    if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
+        if (PtrAlign.getQuantity() >= Alignment) {
+            S.Stk.push<Boolean>(true);
+            return true;
+        }
+        // If the alignment is not known to be sufficient, some cases could still
+        // be aligned at run time. However, if the requested alignment is less or
+        // equal to the base alignment and the offset is not aligned, we know that
+        // the run-time value can never be aligned.
+        if (BaseAlignment.getQuantity() >= Alignment &&
+                PtrAlign.getQuantity() < Alignment) {
+            S.Stk.push<Boolean>(false);
+            return true;
+        }
 
-  if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
-    if (PtrAlign.getQuantity() >= Alignment) {
-      S.Stk.push<Boolean>(true);
-      return true;
-    }
-    // If the alignment is not known to be sufficient, some cases could still
-    // be aligned at run time. However, if the requested alignment is less or
-    // equal to the base alignment and the offset is not aligned, we know that
-    // the run-time value can never be aligned.
-    if (BaseAlignment.getQuantity() >= Alignment &&
-        PtrAlign.getQuantity() < Alignment) {
-      S.Stk.push<Boolean>(false);
-      return true;
+        S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
+            << Alignment;
+        return false;
     }
 
-    S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
-        << Alignment;
-    return false;
-  }
+    assert(BuiltinOp == Builtin::BI__builtin_align_down ||
+            BuiltinOp == Builtin::BI__builtin_align_up);
 
-  assert(BuiltinOp == Builtin::BI__builtin_align_down ||
-         BuiltinOp == Builtin::BI__builtin_align_up);
-
-  // For align_up/align_down, we can return the same value if the alignment
-  // is known to be greater or equal to the requested value.
-  if (PtrAlign.getQuantity() >= Alignment) {
-    S.Stk.push<Pointer>(Ptr);
-    return true;
-  }
+    // For align_up/align_down, we can return the same value if the alignment
+    // is known to be greater or equal to the requested value.
+    if (PtrAlign.getQuantity() >= Alignment) {
+        S.Stk.push<Pointer>(Ptr);
+        return true;
+    }
 
-  // The alignment could be greater than the minimum at run-time, so we cannot
-  // infer much about the resulting pointer value. One case is possible:
-  // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
-  // can infer the correct index if the requested alignment is smaller than
-  // the base alignment so we can perform the computation on the offset.
-  if (BaseAlignment.getQuantity() >= Alignment) {
-    assert(Alignment.getBitWidth() <= 64 &&
-           "Cannot handle > 64-bit address-space");
-    uint64_t Alignment64 = Alignment.getZExtValue();
-    CharUnits NewOffset =
-        CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
-                                    ? llvm::alignDown(PtrOffset, Alignment64)
-                                    : llvm::alignTo(PtrOffset, Alignment64));
-
-    S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
-    return true;
-  }
+    // The alignment could be greater than the minimum at run-time, so we cannot
+    // infer much about the resulting pointer value. One case is possible:
+    // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
+    // can infer the correct index if the requested alignment is smaller than
+    // the base alignment so we can perform the computation on the offset.
+    if (BaseAlignment.getQuantity() >= Alignment) {
+        assert(Alignment.getBitWidth() <= 64 &&
+                "Cannot handle > 64-bit address-space");
+        uint64_t Alignment64 = Alignment.getZExtValue();
+        CharUnits NewOffset =
+            CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
+                    ? llvm::alignDown(PtrOffset, Alignment64)
+                    : llvm::alignTo(PtrOffset, Alignment64));
+
+        S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
+        return true;
+    }
 
-  // Otherwise, we cannot constant-evaluate the result.
-  S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
-  return false;
+    // Otherwise, we cannot constant-evaluate the result.
+    S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
+    return false;
 }
 
 /// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
 static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
-                                           const InterpFrame *Frame,
-                                           const CallExpr *Call) {
-  assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
-
-  std::optional<APSInt> ExtraOffset;
-  if (Call->getNumArgs() == 3)
-    ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
-
-  APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
-
-  CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
-
-  // If there is a base object, then it must have the correct alignment.
-  if (Ptr.isBlockPointer()) {
-    CharUnits BaseAlignment;
-    if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
-      BaseAlignment = S.getASTContext().getDeclAlign(VD);
-    else if (const auto *E = Ptr.getDeclDesc()->asExpr())
-      BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
-
-    if (BaseAlignment < Align) {
-      S.CCEDiag(Call->getArg(0),
-                diag::note_constexpr_baa_insufficient_alignment)
-          << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
-      return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
+
+    std::optional<APSInt> ExtraOffset;
+    if (Call->getNumArgs() == 3)
+        ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
+
+    APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+    CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
+
+    // If there is a base object, then it must have the correct alignment.
+    if (Ptr.isBlockPointer()) {
+        CharUnits BaseAlignment;
+        if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
+            BaseAlignment = S.getASTContext().getDeclAlign(VD);
+        else if (const auto *E = Ptr.getDeclDesc()->asExpr())
+            BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
+
+        if (BaseAlignment < Align) {
+            S.CCEDiag(Call->getArg(0),
+                    diag::note_constexpr_baa_insufficient_alignment)
+                << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
+            return false;
+        }
     }
-  }
 
-  APValue AV = Ptr.toAPValue(S.getASTContext());
-  CharUnits AVOffset = AV.getLValueOffset();
-  if (ExtraOffset)
-    AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
-  if (AVOffset.alignTo(Align) != AVOffset) {
-    if (Ptr.isBlockPointer())
-      S.CCEDiag(Call->getArg(0),
-                diag::note_constexpr_baa_insufficient_alignment)
-          << 1 << AVOffset.getQuantity() << Align.getQuantity();
-    else
-      S.CCEDiag(Call->getArg(0),
-                diag::note_constexpr_baa_value_insufficient_alignment)
-          << AVOffset.getQuantity() << Align.getQuantity();
-    return false;
-  }
+    APValue AV = Ptr.toAPValue(S.getASTContext());
+    CharUnits AVOffset = AV.getLValueOffset();
+    if (ExtraOffset)
+        AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
+    if (AVOffset.alignTo(Align) != AVOffset) {
+        if (Ptr.isBlockPointer())
+            S.CCEDiag(Call->getArg(0),
+                    diag::note_constexpr_baa_insufficient_alignment)
+                << 1 << AVOffset.getQuantity() << Align.getQuantity();
+        else
+            S.CCEDiag(Call->getArg(0),
+                    diag::note_constexpr_baa_value_insufficient_alignment)
+                << AVOffset.getQuantity() << Align.getQuantity();
+        return false;
+    }
 
-  S.Stk.push<Pointer>(Ptr);
-  return true;
+    S.Stk.push<Pointer>(Ptr);
+    return true;
 }
 
 static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
-                                       const InterpFrame *Frame,
-                                       const CallExpr *Call) {
-  if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
-      !Call->getArg(1)->getType()->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
+            !Call->getArg(1)->getType()->isIntegerType())
+        return false;
 
-  PrimType ValT = *S.Ctx.classify(Call->getArg(0));
-  PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
-  APSInt Index = popToAPSInt(S.Stk, IndexT);
-  APSInt Val = popToAPSInt(S.Stk, ValT);
+    PrimType ValT = *S.Ctx.classify(Call->getArg(0));
+    PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
+    APSInt Index = popToAPSInt(S.Stk, IndexT);
+    APSInt Val = popToAPSInt(S.Stk, ValT);
 
-  unsigned BitWidth = Val.getBitWidth();
-  uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
-  uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
-  Length = Length > BitWidth ? BitWidth : Length;
+    unsigned BitWidth = Val.getBitWidth();
+    uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
+    uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
+    Length = Length > BitWidth ? BitWidth : Length;
 
-  // Handle out of bounds cases.
-  if (Length == 0 || Shift >= BitWidth) {
-    pushInteger(S, 0, Call->getType());
-    return true;
-  }
+    // Handle out of bounds cases.
+    if (Length == 0 || Shift >= BitWidth) {
+        pushInteger(S, 0, Call->getType());
+        return true;
+    }
 
-  uint64_t Result = Val.getZExtValue() >> Shift;
-  Result &= llvm::maskTrailingOnes<uint64_t>(Length);
-  pushInteger(S, Result, Call->getType());
-  return true;
+    uint64_t Result = Val.getZExtValue() >> Shift;
+    Result &= llvm::maskTrailingOnes<uint64_t>(Length);
+    pushInteger(S, Result, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
-                                      const InterpFrame *Frame,
-                                      const CallExpr *Call) {
-  QualType CallType = Call->getType();
-  if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
-      !Call->getArg(1)->getType()->isIntegerType() ||
-      !CallType->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    QualType CallType = Call->getType();
+    if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
+            !Call->getArg(1)->getType()->isIntegerType() ||
+            !CallType->isIntegerType())
+        return false;
 
-  PrimType ValT = *S.Ctx.classify(Call->getArg(0));
-  PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
+    PrimType ValT = *S.Ctx.classify(Call->getArg(0));
+    PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
 
-  APSInt Idx = popToAPSInt(S.Stk, IndexT);
-  APSInt Val = popToAPSInt(S.Stk, ValT);
+    APSInt Idx = popToAPSInt(S.Stk, IndexT);
+    APSInt Val = popToAPSInt(S.Stk, ValT);
 
-  unsigned BitWidth = Val.getBitWidth();
-  uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
+    unsigned BitWidth = Val.getBitWidth();
+    uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
 
-  if (Index < BitWidth)
-    Val.clearHighBits(BitWidth - Index);
+    if (Index < BitWidth)
+        Val.clearHighBits(BitWidth - Index);
 
-  pushInteger(S, Val, CallType);
-  return true;
+    pushInteger(S, Val, CallType);
+    return true;
 }
 
 static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
-                                       const InterpFrame *Frame,
-                                       const CallExpr *Call) {
-  QualType CallType = Call->getType();
-  if (!CallType->isIntegerType() ||
-      !Call->getArg(0)->getType()->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    QualType CallType = Call->getType();
+    if (!CallType->isIntegerType() ||
+            !Call->getArg(0)->getType()->isIntegerType())
+        return false;
 
-  APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
-  pushInteger(S, Val.countLeadingZeros(), CallType);
-  return true;
+    APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
+    pushInteger(S, Val.countLeadingZeros(), CallType);
+    return true;
 }
 
 static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
-                                       const InterpFrame *Frame,
-                                       const CallExpr *Call) {
-  QualType CallType = Call->getType();
-  if (!CallType->isIntegerType() ||
-      !Call->getArg(0)->getType()->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    QualType CallType = Call->getType();
+    if (!CallType->isIntegerType() ||
+            !Call->getArg(0)->getType()->isIntegerType())
+        return false;
 
-  APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
-  pushInteger(S, Val.countTrailingZeros(), CallType);
-  return true;
+    APSInt Val = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
+    pushInteger(S, Val.countTrailingZeros(), CallType);
+    return true;
 }
 
 static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
-                                      const InterpFrame *Frame,
-                                      const CallExpr *Call) {
-  if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
-      !Call->getArg(1)->getType()->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
+            !Call->getArg(1)->getType()->isIntegerType())
+        return false;
 
-  PrimType ValT = *S.Ctx.classify(Call->getArg(0));
-  PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
+    PrimType ValT = *S.Ctx.classify(Call->getArg(0));
+    PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
 
-  APSInt Mask = popToAPSInt(S.Stk, MaskT);
-  APSInt Val = popToAPSInt(S.Stk, ValT);
+    APSInt Mask = popToAPSInt(S.Stk, MaskT);
+    APSInt Val = popToAPSInt(S.Stk, ValT);
 
-  unsigned BitWidth = Val.getBitWidth();
-  APInt Result = APInt::getZero(BitWidth);
-  for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
-    if (Mask[I])
-      Result.setBitVal(I, Val[P++]);
-  }
-  pushInteger(S, std::move(Result), Call->getType());
-  return true;
+    unsigned BitWidth = Val.getBitWidth();
+    APInt Result = APInt::getZero(BitWidth);
+    for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
+        if (Mask[I])
+            Result.setBitVal(I, Val[P++]);
+    }
+    pushInteger(S, std::move(Result), Call->getType());
+    return true;
 }
 
 static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
-                                      const InterpFrame *Frame,
-                                      const CallExpr *Call) {
-  if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
-      !Call->getArg(1)->getType()->isIntegerType())
-    return false;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
+            !Call->getArg(1)->getType()->isIntegerType())
+        return false;
 
-  PrimType ValT = *S.Ctx.classify(Call->getArg(0));
-  PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
+    PrimType ValT = *S.Ctx.classify(Call->getArg(0));
+    PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
 
-  APSInt Mask = popToAPSInt(S.Stk, MaskT);
-  APSInt Val = popToAPSInt(S.Stk, ValT);
+    APSInt Mask = popToAPSInt(S.Stk, MaskT);
+    APSInt Val = popToAPSInt(S.Stk, ValT);
 
-  unsigned BitWidth = Val.getBitWidth();
-  APInt Result = APInt::getZero(BitWidth);
-  for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
-    if (Mask[I])
-      Result.setBitVal(P++, Val[I]);
-  }
-  pushInteger(S, std::move(Result), Call->getType());
-  return true;
+    unsigned BitWidth = Val.getBitWidth();
+    APInt Result = APInt::getZero(BitWidth);
+    for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
+        if (Mask[I])
+            Result.setBitVal(P++, Val[I]);
+    }
+    pushInteger(S, std::move(Result), Call->getType());
+    return true;
 }
 
 /// (CarryIn, LHS, RHS, Result)
 static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
-                                                    CodePtr OpPC,
-                                                    const InterpFrame *Frame,
-                                                    const CallExpr *Call,
-                                                    unsigned BuiltinOp) {
-  if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
-      !Call->getArg(1)->getType()->isIntegerType() ||
-      !Call->getArg(2)->getType()->isIntegerType())
-    return false;
+        CodePtr OpPC,
+        const InterpFrame *Frame,
+        const CallExpr *Call,
+        unsigned BuiltinOp) {
+    if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
+            !Call->getArg(1)->getType()->isIntegerType() ||
+            !Call->getArg(2)->getType()->isIntegerType())
+        return false;
 
-  const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
+    const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
 
-  PrimType CarryInT = *S.getContext().classify(Call->getArg(0));
-  PrimType LHST = *S.getContext().classify(Call->getArg(1));
-  PrimType RHST = *S.getContext().classify(Call->getArg(2));
-  APSInt RHS = popToAPSInt(S.Stk, RHST);
-  APSInt LHS = popToAPSInt(S.Stk, LHST);
-  APSInt CarryIn = popToAPSInt(S.Stk, CarryInT);
+    PrimType CarryInT = *S.getContext().classify(Call->getArg(0));
+    PrimType LHST = *S.getContext().classify(Call->getArg(1));
+    PrimType RHST = *S.getContext().classify(Call->getArg(2));
+    APSInt RHS = popToAPSInt(S.Stk, RHST);
+    APSInt LHS = popToAPSInt(S.Stk, LHST);
+    APSInt CarryIn = popToAPSInt(S.Stk, CarryInT);
 
-  bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
-               BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
+    bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
+        BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
 
-  unsigned BitWidth = LHS.getBitWidth();
-  unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
-  APInt ExResult =
-      IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
-            : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
+    unsigned BitWidth = LHS.getBitWidth();
+    unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
+    APInt ExResult =
+        IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
+        : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
 
-  APInt Result = ExResult.extractBits(BitWidth, 0);
-  APSInt CarryOut =
-      APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
+    APInt Result = ExResult.extractBits(BitWidth, 0);
+    APSInt CarryOut =
+        APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
 
-  QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
-  PrimType CarryOutT = *S.getContext().classify(CarryOutType);
-  assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
+    QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
+    PrimType CarryOutT = *S.getContext().classify(CarryOutType);
+    assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
 
-  pushInteger(S, CarryOut, Call->getType());
+    pushInteger(S, CarryOut, Call->getType());
 
-  return true;
+    return true;
 }
 
 static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
-                                                      CodePtr OpPC,
-                                                      const InterpFrame *Frame,
-                                                      const CallExpr *Call) {
-  analyze_os_log::OSLogBufferLayout Layout;
-  analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout);
-  pushInteger(S, Layout.size().getQuantity(), Call->getType());
-  return true;
+        CodePtr OpPC,
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    analyze_os_log::OSLogBufferLayout Layout;
+    analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout);
+    pushInteger(S, Layout.size().getQuantity(), Call->getType());
+    return true;
 }
 
 static bool
 interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
-                                             const InterpFrame *Frame,
-                                             const CallExpr *Call) {
-  const auto &Ptr = S.Stk.pop<Pointer>();
-  assert(Ptr.getFieldDesc()->isPrimitiveArray());
-
-  // This should be created for a StringLiteral, so should alway shold at least
-  // one array element.
-  assert(Ptr.getFieldDesc()->getNumElems() >= 1);
-  StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
-  uint64_t Result = getPointerAuthStableSipHash(R);
-  pushInteger(S, Result, Call->getType());
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    const auto &Ptr = S.Stk.pop<Pointer>();
+    assert(Ptr.getFieldDesc()->isPrimitiveArray());
+
+    // This should be created for a StringLiteral, so should alway shold at least
+    // one array element.
+    assert(Ptr.getFieldDesc()->getNumElems() >= 1);
+    StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
+    uint64_t Result = getPointerAuthStableSipHash(R);
+    pushInteger(S, Result, Call->getType());
+    return true;
 }
 
 static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
-                                         const InterpFrame *Frame,
-                                         const CallExpr *Call) {
-  // A call to __operator_new is only valid within std::allocate<>::allocate.
-  // Walk up the call stack to find the appropriate caller and get the
-  // element type from it.
-  auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
-
-  if (ElemType.isNull()) {
-    S.FFDiag(Call, S.getLangOpts().CPlusPlus20
-                       ? diag::note_constexpr_new_untyped
-                       : diag::note_constexpr_new);
-    return false;
-  }
-  assert(NewCall);
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    // A call to __operator_new is only valid within std::allocate<>::allocate.
+    // Walk up the call stack to find the appropriate caller and get the
+    // element type from it.
+    auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
+
+    if (ElemType.isNull()) {
+        S.FFDiag(Call, S.getLangOpts().CPlusPlus20
+                ? diag::note_constexpr_new_untyped
+                : diag::note_constexpr_new);
+        return false;
+    }
+    assert(NewCall);
 
-  if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
-    S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
-        << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
-    return false;
-  }
+    if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
+        S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
+            << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
+        return false;
+    }
 
-  // We only care about the first parameter (the size), so discard all the
-  // others.
-  {
-    unsigned NumArgs = Call->getNumArgs();
-    assert(NumArgs >= 1);
-
-    // The std::nothrow_t arg never gets put on the stack.
-    if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
-      --NumArgs;
-    auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
-    // First arg is needed.
-    Args = Args.drop_front();
-
-    // Discard the rest.
-    for (const Expr *Arg : Args)
-      discard(S.Stk, *S.getContext().classify(Arg));
-  }
+    // We only care about the first parameter (the size), so discard all the
+    // others.
+    {
+        unsigned NumArgs = Call->getNumArgs();
+        assert(NumArgs >= 1);
+
+        // The std::nothrow_t arg never gets put on the stack.
+        if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
+            --NumArgs;
+        auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
+        // First arg is needed.
+        Args = Args.drop_front();
+
+        // Discard the rest.
+        for (const Expr *Arg : Args)
+            discard(S.Stk, *S.getContext().classify(Arg));
+    }
 
-  APSInt Bytes = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
-  CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
-  assert(!ElemSize.isZero());
-  // Divide the number of bytes by sizeof(ElemType), so we get the number of
-  // elements we should allocate.
-  APInt NumElems, Remainder;
-  APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
-  APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
-  if (Remainder != 0) {
-    // This likely indicates a bug in the implementation of 'std::allocator'.
-    S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
-        << Bytes << APSInt(ElemSizeAP, true) << ElemType;
-    return false;
-  }
+    APSInt Bytes = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
+    CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
+    assert(!ElemSize.isZero());
+    // Divide the number of bytes by sizeof(ElemType), so we get the number of
+    // elements we should allocate.
+    APInt NumElems, Remainder;
+    APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
+    APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
+    if (Remainder != 0) {
+        // This likely indicates a bug in the implementation of 'std::allocator'.
+        S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
+            << Bytes << APSInt(ElemSizeAP, true) << ElemType;
+        return false;
+    }
 
-  // NB: The same check we're using in CheckArraySize()
-  if (NumElems.getActiveBits() >
-          ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
-      NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
-    // FIXME: NoThrow check?
-    const SourceInfo &Loc = S.Current->getSource(OpPC);
-    S.FFDiag(Loc, diag::note_constexpr_new_too_large)
-        << NumElems.getZExtValue();
-    return false;
-  }
+    // NB: The same check we're using in CheckArraySize()
+    if (NumElems.getActiveBits() >
+            ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
+            NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
+        // FIXME: NoThrow check?
+        const SourceInfo &Loc = S.Current->getSource(OpPC);
+        S.FFDiag(Loc, diag::note_constexpr_new_too_large)
+            << NumElems.getZExtValue();
+        return false;
+    }
 
-  if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
-    return false;
+    if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
+        return false;
 
-  bool IsArray = NumElems.ugt(1);
-  OptPrimType ElemT = S.getContext().classify(ElemType);
-  DynamicAllocator &Allocator = S.getAllocator();
-  if (ElemT) {
-    Block *B =
-        Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
-                           S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
-    assert(B);
-    S.Stk.push<Pointer>(Pointer(B).atIndex(0));
-    return true;
-  }
+    bool IsArray = NumElems.ugt(1);
+    OptPrimType ElemT = S.getContext().classify(ElemType);
+    DynamicAllocator &Allocator = S.getAllocator();
+    if (ElemT) {
+        Block *B =
+            Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
+                    S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
+        assert(B);
+        S.Stk.push<Pointer>(Pointer(B).atIndex(0));
+        return true;
+    }
+
+    assert(!ElemT);
+
+    // Composite arrays
+    if (IsArray) {
+        const Descriptor *Desc =
+            S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
+        Block *B =
+            Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
+                    DynamicAllocator::Form::Operator);
+        assert(B);
+        S.Stk.push<Pointer>(Pointer(B).atIndex(0));
+        return true;
+    }
 
-  assert(!ElemT);
+    // Records. Still allocate them as single-element arrays.
+    QualType AllocType = S.getASTContext().getConstantArrayType(
+            ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
 
-  // Composite arrays
-  if (IsArray) {
-    const Descriptor *Desc =
-        S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
-    Block *B =
-        Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
-                           DynamicAllocator::Form::Operator);
+    const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
+            Descriptor::InlineDescMD);
+    Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
+            DynamicAllocator::Form::Operator);
     assert(B);
-    S.Stk.push<Pointer>(Pointer(B).atIndex(0));
+    S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
     return true;
-  }
-
-  // Records. Still allocate them as single-element arrays.
-  QualType AllocType = S.getASTContext().getConstantArrayType(
-      ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
-
-  const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
-                                                Descriptor::InlineDescMD);
-  Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
-                                DynamicAllocator::Form::Operator);
-  assert(B);
-  S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
-  return true;
 }
 
 static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
-                                            const InterpFrame *Frame,
-                                            const CallExpr *Call) {
-  const Expr *Source = nullptr;
-  const Block *BlockToDelete = nullptr;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    const Expr *Source = nullptr;
+    const Block *BlockToDelete = nullptr;
 
-  if (S.checkingPotentialConstantExpression()) {
-    S.Stk.discard<Pointer>();
-    return false;
-  }
+    if (S.checkingPotentialConstantExpression()) {
+        S.Stk.discard<Pointer>();
+        return false;
+    }
 
-  // This is permitted only within a call to std::allocator<T>::deallocate.
-  if (!S.getStdAllocatorCaller("deallocate")) {
-    S.FFDiag(Call);
-    S.Stk.discard<Pointer>();
-    return true;
-  }
+    // This is permitted only within a call to std::allocator<T>::deallocate.
+    if (!S.getStdAllocatorCaller("deallocate")) {
+        S.FFDiag(Call);
+        S.Stk.discard<Pointer>();
+        return true;
+    }
 
-  {
-    const Pointer &Ptr = S.Stk.pop<Pointer>();
+    {
+        const Pointer &Ptr = S.Stk.pop<Pointer>();
 
-    if (Ptr.isZero()) {
-      S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
-      return true;
-    }
+        if (Ptr.isZero()) {
+            S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
+            return true;
+        }
 
-    Source = Ptr.getDeclDesc()->asExpr();
-    BlockToDelete = Ptr.block();
+        Source = Ptr.getDeclDesc()->asExpr();
+        BlockToDelete = Ptr.block();
 
-    if (!BlockToDelete->isDynamic()) {
-      S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
-          << Ptr.toDiagnosticString(S.getASTContext());
-      if (const auto *D = Ptr.getFieldDesc()->asDecl())
-        S.Note(D->getLocation(), diag::note_declared_at);
+        if (!BlockToDelete->isDynamic()) {
+            S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
+                << Ptr.toDiagnosticString(S.getASTContext());
+            if (const auto *D = Ptr.getFieldDesc()->asDecl())
+                S.Note(D->getLocation(), diag::note_declared_at);
+        }
     }
-  }
-  assert(BlockToDelete);
+    assert(BlockToDelete);
 
-  DynamicAllocator &Allocator = S.getAllocator();
-  const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
-  std::optional<DynamicAllocator::Form> AllocForm =
-      Allocator.getAllocationForm(Source);
+    DynamicAllocator &Allocator = S.getAllocator();
+    const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
+    std::optional<DynamicAllocator::Form> AllocForm =
+        Allocator.getAllocationForm(Source);
 
-  if (!Allocator.deallocate(Source, BlockToDelete, S)) {
-    // Nothing has been deallocated, this must be a double-delete.
-    const SourceInfo &Loc = S.Current->getSource(OpPC);
-    S.FFDiag(Loc, diag::note_constexpr_double_delete);
-    return false;
-  }
-  assert(AllocForm);
+    if (!Allocator.deallocate(Source, BlockToDelete, S)) {
+        // Nothing has been deallocated, this must be a double-delete.
+        const SourceInfo &Loc = S.Current->getSource(OpPC);
+        S.FFDiag(Loc, diag::note_constexpr_double_delete);
+        return false;
+    }
+    assert(AllocForm);
 
-  return CheckNewDeleteForms(
-      S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
+    return CheckNewDeleteForms(
+            S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
 }
 
 static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
-                                             const InterpFrame *Frame,
-                                             const CallExpr *Call) {
-  const Floating &Arg0 = S.Stk.pop<Floating>();
-  S.Stk.push<Floating>(Arg0);
-  return true;
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    const Floating &Arg0 = S.Stk.pop<Floating>();
+    S.Stk.push<Floating>(Arg0);
+    return true;
 }
 
 static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
-                                          const CallExpr *Call, unsigned ID) {
-  const Pointer &Arg = S.Stk.pop<Pointer>();
-  assert(Arg.getFieldDesc()->isPrimitiveArray());
+        const CallExpr *Call, unsigned ID) {
+    const Pointer &Arg = S.Stk.pop<Pointer>();
+    assert(Arg.getFieldDesc()->isPrimitiveArray());
 
-  QualType ElemType = Arg.getFieldDesc()->getElemQualType();
-  assert(Call->getType() == ElemType);
-  PrimType ElemT = *S.getContext().classify(ElemType);
-  unsigned NumElems = Arg.getNumElems();
-
-  INT_TYPE_SWITCH_NO_BOOL(ElemT, {
-    T Result = Arg.elem<T>(0);
-    unsigned BitWidth = Result.bitWidth();
-    for (unsigned I = 1; I != NumElems; ++I) {
-      T Elem = Arg.elem<T>(I);
-      T PrevResult = Result;
-
-      if (ID == Builtin::BI__builtin_reduce_add) {
-        if (T::add(Result, Elem, BitWidth, &Result)) {
-          unsigned OverflowBits = BitWidth + 1;
-          (void)handleOverflow(S, OpPC,
-                               (PrevResult.toAPSInt(OverflowBits) +
-                                Elem.toAPSInt(OverflowBits)));
-          return false;
-        }
-      } else if (ID == Builtin::BI__builtin_reduce_mul) {
-        if (T::mul(Result, Elem, BitWidth, &Result)) {
-          unsigned OverflowBits = BitWidth * 2;
-          (void)handleOverflow(S, OpPC,
-                               (PrevResult.toAPSInt(OverflowBits) *
-                                Elem.toAPSInt(OverflowBits)));
-          return false;
-        }
+    QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+    assert(Call->getType() == ElemType);
+    PrimType ElemT = *S.getContext().classify(ElemType);
+    unsigned NumElems = Arg.getNumElems();
 
-      } else if (ID == Builtin::BI__builtin_reduce_and) {
-        (void)T::bitAnd(Result, Elem, BitWidth, &Result);
-      } else if (ID == Builtin::BI__builtin_reduce_or) {
-        (void)T::bitOr(Result, Elem, BitWidth, &Result);
-      } else if (ID == Builtin::BI__builtin_reduce_xor) {
-        (void)T::bitXor(Result, Elem, BitWidth, &Result);
-      } else if (ID == Builtin::BI__builtin_reduce_min) {
-        if (Elem < Result)
-          Result = Elem;
-      } else if (ID == Builtin::BI__builtin_reduce_max) {
-        if (Elem > Result)
-          Result = Elem;
-      } else {
-        llvm_unreachable("Unhandled vector reduce builtin");
-      }
-    }
-    pushInteger(S, Result.toAPSInt(), Call->getType());
-  });
+    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+            T Result = Arg.elem<T>(0);
+            unsigned BitWidth = Result.bitWidth();
+            for (unsigned I = 1; I != NumElems; ++I) {
+            T Elem = Arg.elem<T>(I);
+            T PrevResult = Result;
+
+            if (ID == Builtin::BI__builtin_reduce_add) {
+            if (T::add(Result, Elem, BitWidth, &Result)) {
+            unsigned OverflowBits = BitWidth + 1;
+            (void)handleOverflow(S, OpPC,
+                    (PrevResult.toAPSInt(OverflowBits) +
+                     Elem.toAPSInt(OverflowBits)));
+            return false;
+            }
+            } else if (ID == Builtin::BI__builtin_reduce_mul) {
+            if (T::mul(Result, Elem, BitWidth, &Result)) {
+            unsigned OverflowBits = BitWidth * 2;
+            (void)handleOverflow(S, OpPC,
+                    (PrevResult.toAPSInt(OverflowBits) *
+                     Elem.toAPSInt(OverflowBits)));
+            return false;
+            }
+
+            } else if (ID == Builtin::BI__builtin_reduce_and) {
+                (void)T::bitAnd(Result, Elem, BitWidth, &Result);
+            } else if (ID == Builtin::BI__builtin_reduce_or) {
+                (void)T::bitOr(Result, Elem, BitWidth, &Result);
+            } else if (ID == Builtin::BI__builtin_reduce_xor) {
+                (void)T::bitXor(Result, Elem, BitWidth, &Result);
+            } else if (ID == Builtin::BI__builtin_reduce_min) {
+                if (Elem < Result)
+                    Result = Elem;
+            } else if (ID == Builtin::BI__builtin_reduce_max) {
+                if (Elem > Result)
+                    Result = Elem;
+            } else {
+                llvm_unreachable("Unhandled vector reduce builtin");
+            }
+            }
+            pushInteger(S, Result.toAPSInt(), Call->getType());
+    });
 
-  return true;
+    return true;
 }
 
 static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC,
-                                            const InterpFrame *Frame,
-                                            const CallExpr *Call,
-                                            unsigned BuiltinID) {
-  // FIXME: add support of floating point
-  assert(!Call->getArg(0)->getType()->isFloatingType() &&
-         "floating point is currently not supported");
-
-  assert(Call->getNumArgs() == 1);
-  if (Call->getArg(0)->getType()->isIntegerType()) {
-    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-    APSInt Val = popToAPSInt(S.Stk, ArgT);
-
-    pushInteger(S, Val.abs(), Call->getType());
-    return true;
-  }
-
-  // Otherwise, the argument must be a vector.
-  assert(Call->getArg(0)->getType()->isVectorType());
-  const Pointer &Arg = S.Stk.pop<Pointer>();
-  assert(Arg.getFieldDesc()->isPrimitiveArray());
-  const Pointer &Dst = S.Stk.peek<Pointer>();
-  assert(Dst.getFieldDesc()->isPrimitiveArray());
-  assert(Arg.getFieldDesc()->getNumElems() ==
-         Dst.getFieldDesc()->getNumElems());
+        const InterpFrame *Frame,
+        const CallExpr *Call,
+        unsigned BuiltinID) {
+    assert(Call->getNumArgs() == 1);
+    QualType Ty = Call->getArg(0)->getType();
+    if (Ty->isIntegerType()) {
+        PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+        APSInt Val = popToAPSInt(S.Stk, ArgT);
+        pushInteger(S, Val.abs(), Call->getType());
+        return true;
+    }
 
-  QualType ElemType = Arg.getFieldDesc()->getElemQualType();
-  PrimType ElemT = *S.getContext().classify(ElemType);
-  unsigned NumElems = Arg.getNumElems();
+    if (Ty->isFloatingType()) {
+        Floating Val = S.Stk.pop<Floating>();
+        Floating Result = abs(S, Val);
+        S.Stk.push<Floating>(Result);
+        return true;
+    }
 
-  // FIXME: Reading from uninitialized vector elements?
-  for (unsigned I = 0; I != NumElems; ++I) {
-    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
-      Dst.elem<T>(I) = T::from(static_cast<T>(
-          APSInt(Arg.elem<T>(I).toAPSInt().abs(),
-                 ElemType->isUnsignedIntegerOrEnumerationType())));
-    });
-  }
-  Dst.initializeAllElements();
+    // Otherwise, the argument must be a vector.
+    assert(Call->getArg(0)->getType()->isVectorType());
+    const Pointer &Arg = S.Stk.pop<Pointer>();
+    assert(Arg.getFieldDesc()->isPrimitiveArray());
+    const Pointer &Dst = S.Stk.peek<Pointer>();
+    assert(Dst.getFieldDesc()->isPrimitiveArray());
+    assert(Arg.getFieldDesc()->getNumElems() ==
+            Dst.getFieldDesc()->getNumElems());
+
+    QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+    PrimType ElemT = *S.getContext().classify(ElemType);
+    unsigned NumElems = Arg.getNumElems();
+    // we can either have a vector of integer or a vector of floating point 
+    for (unsigned I = 0; I != NumElems; ++I) {
+        if (ElemType->isIntegerType()) {
+            INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+                    Dst.elem<T>(I) = T::from(static_cast<T>(
+                                APSInt(Arg.elem<T>(I).toAPSInt().abs(),
+                                    ElemType->isUnsignedIntegerOrEnumerationType())));
+                    });
+        } else {
+            Floating Val = Arg.elem<Floating>(I);
+            Dst.elem<Floating>(I) = abs(S, Val);
+        }
+    }
+    Dst.initializeAllElements();
 
-  return true;
+    return true;
 }
 
 /// Can be called with an integer or vector as the first and only parameter.
 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
-                                                 const InterpFrame *Frame,
-                                                 const CallExpr *Call,
-                                                 unsigned BuiltinID) {
-  assert(Call->getNumArgs() == 1);
-  if (Call->getArg(0)->getType()->isIntegerType()) {
-    PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
-    APSInt Val = popToAPSInt(S.Stk, ArgT);
-
-    if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
-      pushInteger(S, Val.popcount(), Call->getType());
-    } else {
-      pushInteger(S, Val.reverseBits(), Call->getType());
+        const InterpFrame *Frame,
+        const CallExpr *Call,
+        unsigned BuiltinID) {
+    assert(Call->getNumArgs() == 1);
+    if (Call->getArg(0)->getType()->isIntegerType()) {
+        PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+        APSInt Val = popToAPSInt(S.Stk, ArgT);
+
+        if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
+            pushInteger(S, Val.popcount(), Call->getType());
+        } else {
+            pushInteger(S, Val.reverseBits(), Call->getType());
+        }
+        return true;
     }
-    return true;
-  }
-  // Otherwise, the argument must be a vector.
-  assert(Call->getArg(0)->getType()->isVectorType());
-  const Pointer &Arg = S.Stk.pop<Pointer>();
-  assert(Arg.getFieldDesc()->isPrimitiveArray());
-  const Pointer &Dst = S.Stk.peek<Pointer>();
-  assert(Dst.getFieldDesc()->isPrimitiveArray());
-  assert(Arg.getFieldDesc()->getNumElems() ==
-         Dst.getFieldDesc()->getNumElems());
-
-  QualType ElemType = Arg.getFieldDesc()->getElemQualType();
-  PrimType ElemT = *S.getContext().classify(ElemType);
-  unsigned NumElems = Arg.getNumElems();
-
-  // FIXME: Reading from uninitialized vector elements?
-  for (unsigned I = 0; I != NumElems; ++I) {
-    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
-      if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
-        Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
-      } else {
-        Dst.elem<T>(I) =
-            T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
-      }
-    });
-  }
-  Dst.initializeAllElements();
+    // Otherwise, the argument must be a vector.
+    assert(Call->getArg(0)->getType()->isVectorType());
+    const Pointer &Arg = S.Stk.pop<Pointer>();
+    assert(Arg.getFieldDesc()->isPrimitiveArray());
+    const Pointer &Dst = S.Stk.peek<Pointer>();
+    assert(Dst.getFieldDesc()->isPrimitiveArray());
+    assert(Arg.getFieldDesc()->getNumElems() ==
+            Dst.getFieldDesc()->getNumElems());
+
+    QualType ElemType = Arg.getFieldDesc()->getElemQualType();
+    PrimType ElemT = *S.getContext().classify(ElemType);
+    unsigned NumElems = Arg.getNumElems();
+
+    // FIXME: Reading from uninitialized vector elements?
+    for (unsigned I = 0; I != NumElems; ++I) {
+        INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+                if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
+                Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
+                } else {
+                Dst.elem<T>(I) =
+                T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
+                }
+                });
+    }
+    Dst.initializeAllElements();
 
-  return true;
+    return true;
 }
 
 static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
-                                   const InterpFrame *Frame,
-                                   const CallExpr *Call, unsigned ID) {
-  assert(Call->getNumArgs() == 3);
-  const ASTContext &ASTCtx = S.getASTContext();
-  PrimType SizeT = *S.getContext().classify(Call->getArg(2));
-  APSInt Size = popToAPSInt(S.Stk, SizeT);
-  const Pointer SrcPtr = S.Stk.pop<Pointer>();
-  const Pointer DestPtr = S.Stk.pop<Pointer>();
-
-  assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
-
-  if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
-    diagnoseNonConstexprBuiltin(S, OpPC, ID);
-
-  bool Move =
-      (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
-       ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
-  bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
-               ID == Builtin::BI__builtin_wmemcpy ||
-               ID == Builtin::BI__builtin_wmemmove;
-
-  // If the size is zero, we treat this as always being a valid no-op.
-  if (Size.isZero()) {
-    S.Stk.push<Pointer>(DestPtr);
-    return true;
-  }
+        const InterpFrame *Frame,
+        const CallExpr *Call, unsigned ID) {
+    assert(Call->getNumArgs() == 3);
+    const ASTContext &ASTCtx = S.getASTContext();
+    PrimType SizeT = *S.getContext().classify(Call->getArg(2));
+    APSInt Size = popToAPSInt(S.Stk, SizeT);
+    const Pointer SrcPtr = S.Stk.pop<Pointer>();
+    const Pointer DestPtr = S.Stk.pop<Pointer>();
+
+    assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
+
+    if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
+        diagnoseNonConstexprBuiltin(S, OpPC, ID);
+
+    bool Move =
+        (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
+         ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
+    bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
+        ID == Builtin::BI__builtin_wmemcpy ||
+        ID == Builtin::BI__builtin_wmemmove;
+
+    // If the size is zero, we treat this as always being a valid no-op.
+    if (Size.isZero()) {
+        S.Stk.push<Pointer>(DestPtr);
+        return true;
+    }
 
-  if (SrcPtr.isZero() || DestPtr.isZero()) {
-    Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
-    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
-        << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
-        << DiagPtr.toDiagnosticString(ASTCtx);
-    return false;
-  }
+    if (SrcPtr.isZero() || DestPtr.isZero()) {
+        Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
+        S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
+            << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
+            << DiagPtr.toDiagnosticString(ASTCtx);
+        return false;
+    }
 
-  // Diagnose integral src/dest pointers specially.
-  if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
-    std::string DiagVal = "(void *)";
-    DiagVal += SrcPtr.isIntegralPointer()
-                   ? std::to_string(SrcPtr.getIntegerRepresentation())
-                   : std::to_string(DestPtr.getIntegerRepresentation());
-    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
-        << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
-    return false;
-  }
+    // Diagnose integral src/dest pointers specially.
+    if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
+        std::string DiagVal = "(void *)";
+        DiagVal += SrcPtr.isIntegralPointer()
+            ? std::to_string(SrcPtr.getIntegerRepresentation())
+            : std::to_string(DestPtr.getIntegerRepresentation());
+        S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
+            << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
+        return false;
+    }
 
-  // Can't read from dummy pointers.
-  if (DestPtr.isDummy() || SrcPtr.isDummy())
-    return false;
+    // Can't read from dummy pointers.
+    if (DestPtr.isDummy() || SrcPtr.isDummy())
+        return false;
 
-  QualType DestElemType = getElemType(DestPtr);
-  if (DestElemType->isIncompleteType()) {
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_ltor_incomplete_type)
-        << DestElemType;
-    return false;
-  }
+    QualType DestElemType = getElemType(DestPtr);
+    if (DestElemType->isIncompleteType()) {
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_ltor_incomplete_type)
+            << DestElemType;
+        return false;
+    }
 
-  size_t RemainingDestElems;
-  if (DestPtr.getFieldDesc()->isArray()) {
-    RemainingDestElems = DestPtr.isUnknownSizeArray()
-                             ? 0
-                             : (DestPtr.getNumElems() - DestPtr.getIndex());
-  } else {
-    RemainingDestElems = 1;
-  }
-  unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
+    size_t RemainingDestElems;
+    if (DestPtr.getFieldDesc()->isArray()) {
+        RemainingDestElems = DestPtr.isUnknownSizeArray()
+            ? 0
+            : (DestPtr.getNumElems() - DestPtr.getIndex());
+    } else {
+        RemainingDestElems = 1;
+    }
+    unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
 
-  if (WChar) {
-    uint64_t WCharSize =
-        ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
-    Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
-                   /*IsUnsigend=*/true);
-  }
+    if (WChar) {
+        uint64_t WCharSize =
+            ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
+        Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
+                /*IsUnsigend=*/true);
+    }
 
-  if (Size.urem(DestElemSize) != 0) {
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_memcpy_unsupported)
-        << Move << WChar << 0 << DestElemType << Size << DestElemSize;
-    return false;
-  }
+    if (Size.urem(DestElemSize) != 0) {
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_memcpy_unsupported)
+            << Move << WChar << 0 << DestElemType << Size << DestElemSize;
+        return false;
+    }
 
-  QualType SrcElemType = getElemType(SrcPtr);
-  size_t RemainingSrcElems;
-  if (SrcPtr.getFieldDesc()->isArray()) {
-    RemainingSrcElems = SrcPtr.isUnknownSizeArray()
-                            ? 0
-                            : (SrcPtr.getNumElems() - SrcPtr.getIndex());
-  } else {
-    RemainingSrcElems = 1;
-  }
-  unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
+    QualType SrcElemType = getElemType(SrcPtr);
+    size_t RemainingSrcElems;
+    if (SrcPtr.getFieldDesc()->isArray()) {
+        RemainingSrcElems = SrcPtr.isUnknownSizeArray()
+            ? 0
+            : (SrcPtr.getNumElems() - SrcPtr.getIndex());
+    } else {
+        RemainingSrcElems = 1;
+    }
+    unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
 
-  if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
-    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
-        << Move << SrcElemType << DestElemType;
-    return false;
-  }
+    if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
+        S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
+            << Move << SrcElemType << DestElemType;
+        return false;
+    }
 
-  if (DestElemType->isIncompleteType() ||
-      DestPtr.getType()->isIncompleteType()) {
-    QualType DiagType =
-        DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_memcpy_incomplete_type)
-        << Move << DiagType;
-    return false;
-  }
+    if (DestElemType->isIncompleteType() ||
+            DestPtr.getType()->isIncompleteType()) {
+        QualType DiagType =
+            DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_memcpy_incomplete_type)
+            << Move << DiagType;
+        return false;
+    }
 
-  if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
-    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
-        << Move << DestElemType;
-    return false;
-  }
+    if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
+        S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
+            << Move << DestElemType;
+        return false;
+    }
 
-  // Check if we have enough elements to read from and write to.
-  size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
-  size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
-  if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
-    APInt N = Size.udiv(DestElemSize);
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_memcpy_unsupported)
-        << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
-        << DestElemType << toString(N, 10, /*Signed=*/false);
-    return false;
-  }
+    // Check if we have enough elements to read from and write to.
+    size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
+    size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
+    if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
+        APInt N = Size.udiv(DestElemSize);
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_memcpy_unsupported)
+            << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
+            << DestElemType << toString(N, 10, /*Signed=*/false);
+        return false;
+    }
 
-  // Check for overlapping memory regions.
-  if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
-    // Remove base casts.
-    Pointer SrcP = SrcPtr;
-    while (SrcP.isBaseClass())
-      SrcP = SrcP.getBase();
-
-    Pointer DestP = DestPtr;
-    while (DestP.isBaseClass())
-      DestP = DestP.getBase();
-
-    unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
-    unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
-    unsigned N = Size.getZExtValue();
-
-    if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
-        (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
-      S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
-          << /*IsWChar=*/false;
-      return false;
+    // Check for overlapping memory regions.
+    if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
+        // Remove base casts.
+        Pointer SrcP = SrcPtr;
+        while (SrcP.isBaseClass())
+            SrcP = SrcP.getBase();
+
+        Pointer DestP = DestPtr;
+        while (DestP.isBaseClass())
+            DestP = DestP.getBase();
+
+        unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
+        unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
+        unsigned N = Size.getZExtValue();
+
+        if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
+                (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
+            S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
+                << /*IsWChar=*/false;
+            return false;
+        }
     }
-  }
 
-  assert(Size.getZExtValue() % DestElemSize == 0);
-  if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
-    return false;
+    assert(Size.getZExtValue() % DestElemSize == 0);
+    if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
+        return false;
 
-  S.Stk.push<Pointer>(DestPtr);
-  return true;
+    S.Stk.push<Pointer>(DestPtr);
+    return true;
 }
 
 /// Determine if T is a character type for which we guarantee that
 /// sizeof(T) == 1.
 static bool isOneByteCharacterType(QualType T) {
-  return T->isCharType() || T->isChar8Type();
+    return T->isCharType() || T->isChar8Type();
 }
 
 static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
-                                   const InterpFrame *Frame,
-                                   const CallExpr *Call, unsigned ID) {
-  assert(Call->getNumArgs() == 3);
-  PrimType SizeT = *S.getContext().classify(Call->getArg(2));
-  const APSInt &Size = popToAPSInt(S.Stk, SizeT);
-  const Pointer &PtrB = S.Stk.pop<Pointer>();
-  const Pointer &PtrA = S.Stk.pop<Pointer>();
-
-  if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
-      ID == Builtin::BIwmemcmp)
-    diagnoseNonConstexprBuiltin(S, OpPC, ID);
-
-  if (Size.isZero()) {
-    pushInteger(S, 0, Call->getType());
-    return true;
-  }
+        const InterpFrame *Frame,
+        const CallExpr *Call, unsigned ID) {
+    assert(Call->getNumArgs() == 3);
+    PrimType SizeT = *S.getContext().classify(Call->getArg(2));
+    const APSInt &Size = popToAPSInt(S.Stk, SizeT);
+    const Pointer &PtrB = S.Stk.pop<Pointer>();
+    const Pointer &PtrA = S.Stk.pop<Pointer>();
+
+    if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
+            ID == Builtin::BIwmemcmp)
+        diagnoseNonConstexprBuiltin(S, OpPC, ID);
+
+    if (Size.isZero()) {
+        pushInteger(S, 0, Call->getType());
+        return true;
+    }
 
-  bool IsWide =
-      (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
-
-  const ASTContext &ASTCtx = S.getASTContext();
-  QualType ElemTypeA = getElemType(PtrA);
-  QualType ElemTypeB = getElemType(PtrB);
-  // FIXME: This is an arbitrary limitation the current constant interpreter
-  // had. We could remove this.
-  if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
-                  !isOneByteCharacterType(ElemTypeB))) {
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_memcmp_unsupported)
-        << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
-        << PtrB.getType();
-    return false;
-  }
+    bool IsWide =
+        (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
+
+    const ASTContext &ASTCtx = S.getASTContext();
+    QualType ElemTypeA = getElemType(PtrA);
+    QualType ElemTypeB = getElemType(PtrB);
+    // FIXME: This is an arbitrary limitation the current constant interpreter
+    // had. We could remove this.
+    if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
+                !isOneByteCharacterType(ElemTypeB))) {
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_memcmp_unsupported)
+            << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
+            << PtrB.getType();
+        return false;
+    }
 
-  if (PtrA.isDummy() || PtrB.isDummy())
-    return false;
+    if (PtrA.isDummy() || PtrB.isDummy())
+        return false;
 
-  // Now, read both pointers to a buffer and compare those.
-  BitcastBuffer BufferA(
-      Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
-  readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
-  // FIXME: The swapping here is UNDOING something we do when reading the
-  // data into the buffer.
-  if (ASTCtx.getTargetInfo().isBigEndian())
-    swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
-
-  BitcastBuffer BufferB(
-      Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
-  readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
-  // FIXME: The swapping here is UNDOING something we do when reading the
-  // data into the buffer.
-  if (ASTCtx.getTargetInfo().isBigEndian())
-    swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
-
-  size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
-                                  BufferB.byteSize().getQuantity());
-
-  unsigned ElemSize = 1;
-  if (IsWide)
-    ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
-  // The Size given for the wide variants is in wide-char units. Convert it
-  // to bytes.
-  size_t ByteSize = Size.getZExtValue() * ElemSize;
-  size_t CmpSize = std::min(MinBufferSize, ByteSize);
-
-  for (size_t I = 0; I != CmpSize; I += ElemSize) {
-    if (IsWide) {
-      INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
-        T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
-        T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
-        if (A < B) {
-          pushInteger(S, -1, Call->getType());
-          return true;
-        }
-        if (A > B) {
-          pushInteger(S, 1, Call->getType());
-          return true;
+    // Now, read both pointers to a buffer and compare those.
+    BitcastBuffer BufferA(
+            Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
+    readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
+    // FIXME: The swapping here is UNDOING something we do when reading the
+    // data into the buffer.
+    if (ASTCtx.getTargetInfo().isBigEndian())
+        swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
+
+    BitcastBuffer BufferB(
+            Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
+    readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
+    // FIXME: The swapping here is UNDOING something we do when reading the
+    // data into the buffer.
+    if (ASTCtx.getTargetInfo().isBigEndian())
+        swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
+
+    size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
+            BufferB.byteSize().getQuantity());
+
+    unsigned ElemSize = 1;
+    if (IsWide)
+        ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
+    // The Size given for the wide variants is in wide-char units. Convert it
+    // to bytes.
+    size_t ByteSize = Size.getZExtValue() * ElemSize;
+    size_t CmpSize = std::min(MinBufferSize, ByteSize);
+
+    for (size_t I = 0; I != CmpSize; I += ElemSize) {
+        if (IsWide) {
+            INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
+                    T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
+                    T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
+                    if (A < B) {
+                    pushInteger(S, -1, Call->getType());
+                    return true;
+                    }
+                    if (A > B) {
+                    pushInteger(S, 1, Call->getType());
+                    return true;
+                    }
+                    });
+        } else {
+            std::byte A = BufferA.Data[I];
+            std::byte B = BufferB.Data[I];
+
+            if (A < B) {
+                pushInteger(S, -1, Call->getType());
+                return true;
+            }
+            if (A > B) {
+                pushInteger(S, 1, Call->getType());
+                return true;
+            }
         }
-      });
-    } else {
-      std::byte A = BufferA.Data[I];
-      std::byte B = BufferB.Data[I];
+    }
 
-      if (A < B) {
-        pushInteger(S, -1, Call->getType());
+    // We compared CmpSize bytes above. If the limiting factor was the Size
+    // passed, we're done and the result is equality (0).
+    if (ByteSize <= CmpSize) {
+        pushInteger(S, 0, Call->getType());
         return true;
-      }
-      if (A > B) {
-        pushInteger(S, 1, Call->getType());
-        return true;
-      }
     }
-  }
-
-  // We compared CmpSize bytes above. If the limiting factor was the Size
-  // passed, we're done and the result is equality (0).
-  if (ByteSize <= CmpSize) {
-    pushInteger(S, 0, Call->getType());
-    return true;
-  }
 
-  // However, if we read all the available bytes but were instructed to read
-  // even more, diagnose this as a "read of dereferenced one-past-the-end
-  // pointer". This is what would happen if we called CheckLoad() on every array
-  // element.
-  S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
-      << AK_Read << S.Current->getRange(OpPC);
-  return false;
+    // However, if we read all the available bytes but were instructed to read
+    // even more, diagnose this as a "read of dereferenced one-past-the-end
+    // pointer". This is what would happen if we called CheckLoad() on every array
+    // element.
+    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
+        << AK_Read << S.Current->getRange(OpPC);
+    return false;
 }
 
 // __builtin_memchr(ptr, int, int)
 // __builtin_strchr(ptr, int)
 static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
-                                   const CallExpr *Call, unsigned ID) {
-  if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
-      ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
-    diagnoseNonConstexprBuiltin(S, OpPC, ID);
-
-  std::optional<APSInt> MaxLength;
-  PrimType DesiredT = *S.getContext().classify(Call->getArg(1));
-  if (Call->getNumArgs() == 3) {
-    PrimType MaxT = *S.getContext().classify(Call->getArg(2));
-    MaxLength = popToAPSInt(S.Stk, MaxT);
-  }
-  APSInt Desired = popToAPSInt(S.Stk, DesiredT);
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
+        const CallExpr *Call, unsigned ID) {
+    if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
+            ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
+        diagnoseNonConstexprBuiltin(S, OpPC, ID);
+
+    std::optional<APSInt> MaxLength;
+    PrimType DesiredT = *S.getContext().classify(Call->getArg(1));
+    if (Call->getNumArgs() == 3) {
+        PrimType MaxT = *S.getContext().classify(Call->getArg(2));
+        MaxLength = popToAPSInt(S.Stk, MaxT);
+    }
+    APSInt Desired = popToAPSInt(S.Stk, DesiredT);
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
 
-  if (MaxLength && MaxLength->isZero()) {
-    S.Stk.push<Pointer>();
-    return true;
-  }
+    if (MaxLength && MaxLength->isZero()) {
+        S.Stk.push<Pointer>();
+        return true;
+    }
 
-  if (Ptr.isDummy())
-    return false;
+    if (Ptr.isDummy())
+        return false;
 
-  // Null is only okay if the given size is 0.
-  if (Ptr.isZero()) {
-    S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
-        << AK_Read;
-    return false;
-  }
+    // Null is only okay if the given size is 0.
+    if (Ptr.isZero()) {
+        S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
+            << AK_Read;
+        return false;
+    }
 
-  QualType ElemTy = Ptr.getFieldDesc()->isArray()
-                        ? Ptr.getFieldDesc()->getElemQualType()
-                        : Ptr.getFieldDesc()->getType();
-  bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
+    QualType ElemTy = Ptr.getFieldDesc()->isArray()
+        ? Ptr.getFieldDesc()->getElemQualType()
+        : Ptr.getFieldDesc()->getType();
+    bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
 
-  // Give up on byte-oriented matching against multibyte elements.
-  if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
-    S.FFDiag(S.Current->getSource(OpPC),
-             diag::note_constexpr_memchr_unsupported)
-        << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
-    return false;
-  }
+    // Give up on byte-oriented matching against multibyte elements.
+    if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
+        S.FFDiag(S.Current->getSource(OpPC),
+                diag::note_constexpr_memchr_unsupported)
+            << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
+        return false;
+    }
 
-  if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
-    // strchr compares directly to the passed integer, and therefore
-    // always fails if given an int that is not a char.
-    if (Desired !=
-        Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
-      S.Stk.push<Pointer>();
-      return true;
+    if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
+        // strchr compares directly to the passed integer, and therefore
+        // always fails if given an int that is not a char.
+        if (Desired !=
+                Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
+            S.Stk.push<Pointer>();
+            return true;
+        }
     }
-  }
 
-  uint64_t DesiredVal;
-  if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
-      ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
-    // wcschr and wmemchr are given a wchar_t to look for. Just use it.
-    DesiredVal = Desired.getZExtValue();
-  } else {
-    DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
-  }
+    uint64_t DesiredVal;
+    if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
+            ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
+        // wcschr and wmemchr are given a wchar_t to look for. Just use it.
+        DesiredVal = Desired.getZExtValue();
+    } else {
+        DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
+    }
 
-  bool StopAtZero =
-      (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
-       ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
+    bool StopAtZero =
+        (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
+         ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
 
-  PrimType ElemT =
-      IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
+    PrimType ElemT =
+        IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
 
-  size_t Index = Ptr.getIndex();
-  size_t Step = 0;
-  for (;;) {
-    const Pointer &ElemPtr =
-        (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
+    size_t Index = Ptr.getIndex();
+    size_t Step = 0;
+    for (;;) {
+        const Pointer &ElemPtr =
+            (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
 
-    if (!CheckLoad(S, OpPC, ElemPtr))
-      return false;
+        if (!CheckLoad(S, OpPC, ElemPtr))
+            return false;
 
-    uint64_t V;
-    INT_TYPE_SWITCH_NO_BOOL(
-        ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
+        uint64_t V;
+        INT_TYPE_SWITCH_NO_BOOL(
+                ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
 
-    if (V == DesiredVal) {
-      S.Stk.push<Pointer>(ElemPtr);
-      return true;
-    }
+        if (V == DesiredVal) {
+            S.Stk.push<Pointer>(ElemPtr);
+            return true;
+        }
 
-    if (StopAtZero && V == 0)
-      break;
+        if (StopAtZero && V == 0)
+            break;
 
-    ++Step;
-    if (MaxLength && Step == MaxLength->getZExtValue())
-      break;
-  }
+        ++Step;
+        if (MaxLength && Step == MaxLength->getZExtValue())
+            break;
+    }
 
-  S.Stk.push<Pointer>();
-  return true;
+    S.Stk.push<Pointer>();
+    return true;
 }
 
 static unsigned computeFullDescSize(const ASTContext &ASTCtx,
-                                    const Descriptor *Desc) {
+        const Descriptor *Desc) {
 
-  if (Desc->isPrimitive())
-    return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
+    if (Desc->isPrimitive())
+        return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
 
-  if (Desc->isArray())
-    return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
-           Desc->getNumElems();
+    if (Desc->isArray())
+        return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
+            Desc->getNumElems();
 
-  if (Desc->isRecord())
-    return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
+    if (Desc->isRecord())
+        return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
 
-  llvm_unreachable("Unhandled descriptor type");
-  return 0;
+    llvm_unreachable("Unhandled descriptor type");
+    return 0;
 }
 
 static unsigned computePointerOffset(const ASTContext &ASTCtx,
-                                     const Pointer &Ptr) {
-  unsigned Result = 0;
-
-  Pointer P = Ptr;
-  while (P.isArrayElement() || P.isField()) {
-    P = P.expand();
-    const Descriptor *D = P.getFieldDesc();
-
-    if (P.isArrayElement()) {
-      unsigned ElemSize =
-          ASTCtx.getTypeSizeInChars(D->getElemQualType()).getQuantity();
-      if (P.isOnePastEnd())
-        Result += ElemSize * P.getNumElems();
-      else
-        Result += ElemSize * P.getIndex();
-      P = P.expand().getArray();
-    } else if (P.isBaseClass()) {
-
-      const auto *RD = cast<CXXRecordDecl>(D->asDecl());
-      bool IsVirtual = Ptr.isVirtualBaseClass();
-      P = P.getBase();
-      const Record *BaseRecord = P.getRecord();
-
-      const ASTRecordLayout &Layout =
-          ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
-      if (IsVirtual)
-        Result += Layout.getVBaseClassOffset(RD).getQuantity();
-      else
-        Result += Layout.getBaseClassOffset(RD).getQuantity();
-    } else if (P.isField()) {
-      const FieldDecl *FD = P.getField();
-      const ASTRecordLayout &Layout =
-          ASTCtx.getASTRecordLayout(FD->getParent());
-      unsigned FieldIndex = FD->getFieldIndex();
-      uint64_t FieldOffset =
-          ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
-              .getQuantity();
-      Result += FieldOffset;
-      P = P.getBase();
-    } else
-      llvm_unreachable("Unhandled descriptor type");
-  }
+        const Pointer &Ptr) {
+    unsigned Result = 0;
+
+    Pointer P = Ptr;
+    while (P.isArrayElement() || P.isField()) {
+        P = P.expand();
+        const Descriptor *D = P.getFieldDesc();
+
+        if (P.isArrayElement()) {
+            unsigned ElemSize =
+                ASTCtx.getTypeSizeInChars(D->getElemQualType()).getQuantity();
+            if (P.isOnePastEnd())
+                Result += ElemSize * P.getNumElems();
+            else
+                Result += ElemSize * P.getIndex();
+            P = P.expand().getArray();
+        } else if (P.isBaseClass()) {
+
+            const auto *RD = cast<CXXRecordDecl>(D->asDecl());
+            bool IsVirtual = Ptr.isVirtualBaseClass();
+            P = P.getBase();
+            const Record *BaseRecord = P.getRecord();
+
+            const ASTRecordLayout &Layout =
+                ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
+            if (IsVirtual)
+                Result += Layout.getVBaseClassOffset(RD).getQuantity();
+            else
+                Result += Layout.getBaseClassOffset(RD).getQuantity();
+        } else if (P.isField()) {
+            const FieldDecl *FD = P.getField();
+            const ASTRecordLayout &Layout =
+                ASTCtx.getASTRecordLayout(FD->getParent());
+            unsigned FieldIndex = FD->getFieldIndex();
+            uint64_t FieldOffset =
+                ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
+                .getQuantity();
+            Result += FieldOffset;
+            P = P.getBase();
+        } else
+            llvm_unreachable("Unhandled descriptor type");
+    }
 
-  return Result;
+    return Result;
 }
 
 static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
-                                        const InterpFrame *Frame,
-                                        const CallExpr *Call) {
-  PrimType KindT = *S.getContext().classify(Call->getArg(1));
-  [[maybe_unused]] unsigned Kind = popToAPSInt(S.Stk, KindT).getZExtValue();
+        const InterpFrame *Frame,
+        const CallExpr *Call) {
+    PrimType KindT = *S.getContext().classify(Call->getArg(1));
+    [[maybe_unused]] unsigned Kind = popToAPSInt(S.Stk, KindT).getZExtValue();
 
-  assert(Kind <= 3 && "unexpected kind");
+    assert(Kind <= 3 && "unexpected kind");
 
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
 
-  if (Ptr.isZero())
-    return false;
+    if (Ptr.isZero())
+        return false;
 
-  const Descriptor *DeclDesc = Ptr.getDeclDesc();
-  if (!DeclDesc)
-    return false;
+    const Descriptor *DeclDesc = Ptr.getDeclDesc();
+    if (!DeclDesc)
+        return false;
 
-  const ASTContext &ASTCtx = S.getASTContext();
+    const ASTContext &ASTCtx = S.getASTContext();
 
-  unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
-  unsigned FullSize = computeFullDescSize(ASTCtx, DeclDesc);
+    unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
+    unsigned FullSize = computeFullDescSize(ASTCtx, DeclDesc);
 
-  pushInteger(S, FullSize - ByteOffset, Call->getType());
+    pushInteger(S, FullSize - ByteOffset, Call->getType());
 
-  return true;
+    return true;
 }
 
 static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
-                                               const CallExpr *Call) {
+        const CallExpr *Call) {
 
-  if (!S.inConstantContext())
-    return false;
+    if (!S.inConstantContext())
+        return false;
 
-  const Pointer &Ptr = S.Stk.pop<Pointer>();
-
-  auto Error = [&](int Diag) {
-    bool CalledFromStd = false;
-    const auto *Callee = S.Current->getCallee();
-    if (Callee && Callee->isInStdNamespace()) {
-      const IdentifierInfo *Identifier = Callee->getIdentifier();
-      CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
-    }
-    S.CCEDiag(CalledFromStd
-                  ? S.Current->Caller->getSource(S.Current->getRetPC())
-                  : S.Current->getSource(OpPC),
-              diag::err_invalid_is_within_lifetime)
-        << (CalledFromStd ? "std::is_within_lifetime"
-                          : "__builtin_is_within_lifetime")
-        << Diag;
-    return false;
-  };
+    const Pointer &Ptr = S.Stk.pop<Pointer>();
 
-  if (Ptr.isZero())
-    return Error(0);
-  if (Ptr.isOnePastEnd())
-    return Error(1);
+    auto Error = [&](int Diag) {
+        bool CalledFromStd = false;
+        const auto *Callee = S.Current->getCallee();
+        if (Callee && Callee->isInStdNamespace()) {
+            const IdentifierInfo *Identifier = Callee->getIdentifier();
+            CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
+        }
+        S.CCEDiag(CalledFromStd
+                ? S.Current->Caller->getSource(S.Current->getRetPC())
+                : S.Current->getSource(OpPC),
+                diag::err_invalid_is_within_lifetime)
+            << (CalledFromStd ? "std::is_within_lifetime"
+                    : "__builtin_is_within_lifetime")
+            << Diag;
+        return false;
+    };
 
-  bool Result = Ptr.getLifetime() != Lifetime::Ended;
-  if (!Ptr.isActive()) {
-    Result = false;
-  } else {
-    if (!CheckLive(S, OpPC, Ptr, AK_Read))
-      return false;
-    if (!CheckMutable(S, OpPC, Ptr))
-      return false;
-    if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
-      return false;
-  }
+    if (Ptr.isZero())
+        return Error(0);
+    if (Ptr.isOnePastEnd())
+        return Error(1);
 
-  // Check if we're currently running an initializer.
-  for (InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) {
-    if (const Function *F = Frame->getFunction();
-        F && F->isConstructor() && Frame->getThis().block() == Ptr.block()) {
-      return Error(2);
+    bool Result = Ptr.getLifetime() != Lifetime::Ended;
+    if (!Ptr.isActive()) {
+        Result = false;
+    } else {
+        if (!CheckLive(S, OpPC, Ptr, AK_Read))
+            return false;
+        if (!CheckMutable(S, OpPC, Ptr))
+            return false;
+        if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
+            return false;
     }
-  }
-  if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
-    return Error(2);
 
-  pushInteger(S, Result, Call->getType());
-  return true;
-}
-
-static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
-                                            const CallExpr *Call,
-                                            unsigned BuiltinID) {
-  assert(Call->getNumArgs() == 2);
-
-  // Single integer case.
-  if (!Call->getArg(0)->getType()->isVectorType()) {
-    assert(!Call->getArg(1)->getType()->isVectorType());
-    APSInt RHS = popToAPSInt(
-        S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
-    APSInt LHS = popToAPSInt(
-        S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
-    APInt Result;
-    if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
-      Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
-    } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
-      Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
-    } else {
-      llvm_unreachable("Wrong builtin ID");
+    // Check if we're currently running an initializer.
+    for (InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) {
+        if (const Function *F = Frame->getFunction();
+                F && F->isConstructor() && Frame->getThis().block() == Ptr.block()) {
+            return Error(2);
+        }
     }
+    if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
+        return Error(2);
 
-    pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
+    pushInteger(S, Result, Call->getType());
     return true;
-  }
+}
 
-  // Vector case.
-  assert(Call->getArg(0)->getType()->isVectorType() &&
-         Call->getArg(1)->getType()->isVectorType());
-  const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
-  assert(VT->getElementType() ==
-         Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
-  assert(VT->getNumElements() ==
-         Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
-  assert(VT->getElementType()->isIntegralOrEnumerationType());
-
-  const Pointer &RHS = S.Stk.pop<Pointer>();
-  const Pointer &LHS = S.Stk.pop<Pointer>();
-  const Pointer &Dst = S.Stk.peek<Pointer>();
-  PrimType ElemT = *S.getContext().classify(VT->getElementType());
-  unsigned NumElems = VT->getNumElements();
-  for (unsigned I = 0; I != NumElems; ++I) {
-    APSInt Elem1;
-    APSInt Elem2;
-    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
-      Elem1 = LHS.elem<T>(I).toAPSInt();
-      Elem2 = RHS.elem<T>(I).toAPSInt();
-    });
+static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
+        const CallExpr *Call,
+        unsigned BuiltinID) {
+    assert(Call->getNumArgs() == 2);
+
+    // Single integer case.
+    if (!Call->getArg(0)->getType()->isVectorType()) {
+        assert(!Call->getArg(1)->getType()->isVectorType());
+        APSInt RHS = popToAPSInt(
+                S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
+        APSInt LHS = popToAPSInt(
+                S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
+        APInt Result;
+        if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
+            Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
+        } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
+            Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
+        } else {
+            llvm_unreachable("Wrong builtin ID");
+        }
 
-    APSInt Result;
-    if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
-      Result = APSInt(Elem1.isSigned() ? Elem1.sadd_sat(Elem2)
-                                       : Elem1.uadd_sat(Elem2),
-                      Call->getType()->isUnsignedIntegerOrEnumerationType());
-    } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
-      Result = APSInt(Elem1.isSigned() ? Elem1.ssub_sat(Elem2)
-                                       : Elem1.usub_sat(Elem2),
-                      Call->getType()->isUnsignedIntegerOrEnumerationType());
-    } else {
-      llvm_unreachable("Wrong builtin ID");
+        pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
+        return true;
     }
 
-    INT_TYPE_SWITCH_NO_BOOL(ElemT,
-                            { Dst.elem<T>(I) = static_cast<T>(Result); });
-  }
-  Dst.initializeAllElements();
+    // Vector case.
+    assert(Call->getArg(0)->getType()->isVectorType() &&
+            Call->getArg(1)->getType()->isVectorType());
+    const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+    assert(VT->getElementType() ==
+            Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
+    assert(VT->getNumElements() ==
+            Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
+    assert(VT->getElementType()->isIntegralOrEnumerationType());
+
+    const Pointer &RHS = S.Stk.pop<Pointer>();
+    const Pointer &LHS = S.Stk.pop<Pointer>();
+    const Pointer &Dst = S.Stk.peek<Pointer>();
+    PrimType ElemT = *S.getContext().classify(VT->getElementType());
+    unsigned NumElems = VT->getNumElements();
+    for (unsigned I = 0; I != NumElems; ++I) {
+        APSInt Elem1;
+        APSInt Elem2;
+        INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+                Elem1 = LHS.elem<T>(I).toAPSInt();
+                Elem2 = RHS.elem<T>(I).toAPSInt();
+                });
+
+        APSInt Result;
+        if (BuiltinID == Builtin::BI__builtin_elementwise_add_sat) {
+            Result = APSInt(Elem1.isSigned() ? Elem1.sadd_sat(Elem2)
+                    : Elem1.uadd_sat(Elem2),
+                    Call->getType()->isUnsignedIntegerOrEnumerationType());
+        } else if (BuiltinID == Builtin::BI__builtin_elementwise_sub_sat) {
+            Result = APSInt(Elem1.isSigned() ? Elem1.ssub_sat(Elem2)
+                    : Elem1.usub_sat(Elem2),
+                    Call->getType()->isUnsignedIntegerOrEnumerationType());
+        } else {
+            llvm_unreachable("Wrong builtin ID");
+        }
 
-  return true;
+        INT_TYPE_SWITCH_NO_BOOL(ElemT,
+                { Dst.elem<T>(I) = static_cast<T>(Result); });
+    }
+    Dst.initializeAllElements();
+
+    return true;
 }
 
 static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC,
-                                               const CallExpr *Call,
-                                               unsigned BuiltinID) {
-  assert(Call->getNumArgs() == 2);
+        const CallExpr *Call,
+        unsigned BuiltinID) {
+    assert(Call->getNumArgs() == 2);
 
-  QualType Arg0Type = Call->getArg(0)->getType();
+    QualType Arg0Type = Call->getArg(0)->getType();
 
-  // TODO: Support floating-point types.
-  if (!(Arg0Type->isIntegerType() ||
-        (Arg0Type->isVectorType() &&
-         Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
-    return false;
+    // TODO: Support floating-point types.
+    if (!(Arg0Type->isIntegerType() ||
+                (Arg0Type->isVectorType() &&
+                 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
+        return false;
 
-  if (!Arg0Type->isVectorType()) {
-    assert(!Call->getArg(1)->getType()->isVectorType());
-    APSInt RHS = popToAPSInt(
-        S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
-    APSInt LHS = popToAPSInt(
-        S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
-    APInt Result;
-    if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
-      Result = std::max(LHS, RHS);
-    } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
-      Result = std::min(LHS, RHS);
-    } else {
-      llvm_unreachable("Wrong builtin ID");
-    }
+    if (!Arg0Type->isVectorType()) {
+        assert(!Call->getArg(1)->getType()->isVectorType());
+        APSInt RHS = popToAPSInt(
+                S.Stk, *S.getContext().classify(Call->getArg(1)->getType()));
+        APSInt LHS = popToAPSInt(
+                S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
+        APInt Result;
+        if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
+            Result = std::max(LHS, RHS);
+        } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
+            Result = std::min(LHS, RHS);
+        } else {
+            llvm_unreachable("Wrong builtin ID");
+        }
 
-    pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
-    return true;
-  }
+        pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
+        return true;
+    }
 
-  // Vector case.
-  assert(Call->getArg(0)->getType()->isVectorType() &&
-         Call->getArg(1)->getType()->isVectorType());
-  const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
-  assert(VT->getElementType() ==
-         Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
-  assert(VT->getNumElements() ==
-         Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
-  assert(VT->getElementType()->isIntegralOrEnumerationType());
-
-  const Pointer &RHS = S.Stk.pop<Pointer>();
-  const Pointer &LHS = S.Stk.pop<Pointer>();
-  const Pointer &Dst = S.Stk.peek<Pointer>();
-  PrimType ElemT = *S.getContext().classify(VT->getElementType());
-  unsigned NumElems = VT->getNumElements();
-  for (unsigned I = 0; I != NumElems; ++I) {
-    APSInt Elem1;
-    APSInt Elem2;
-    INT_TYPE_SWITCH_NO_BOOL(ElemT, {
-      Elem1 = LHS.elem<T>(I).toAPSInt();
-      Elem2 = RHS.elem<T>(I).toAPSInt();
-    });
+    // Vector case.
+    assert(Call->getArg(0)->getType()->isVectorType() &&
+            Call->getArg(1)->getType()->isVectorType());
+    const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+    assert(VT->getElementType() ==
+            Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
+    assert(VT->getNumElements() ==
+            Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
+    assert(VT->getElementType()->isIntegralOrEnumerationType());
+
+    const Pointer &RHS = S.Stk.pop<Pointer>();
+    const Pointer &LHS = S.Stk.pop<Pointer>();
+    const Pointer &Dst = S.Stk.peek<Pointer>();
+    PrimType ElemT = *S.getContext().classify(VT->getElementType());
+    unsigned NumElems = VT->getNumElements();
+    for (unsigned I = 0; I != NumElems; ++I) {
+        APSInt Elem1;
+        APSInt Elem2;
+        INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+                Elem1 = LHS.elem<T>(I).toAPSInt();
+                Elem2 = RHS.elem<T>(I).toAPSInt();
+                });
+
+        APSInt Result;
+        if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
+            Result = APSInt(std::max(Elem1, Elem2),
+                    Call->getType()->isUnsignedIntegerOrEnumerationType());
+        } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
+            Result = APSInt(std::min(Elem1, Elem2),
+                    Call->getType()->isUnsignedIntegerOrEnumerationType());
+        } else {
+            llvm_unreachable("Wrong builtin ID");
+        }
 
-    APSInt Result;
-    if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
-      Result = APSInt(std::max(Elem1, Elem2),
-                      Call->getType()->isUnsignedIntegerOrEnumerationType());
-    } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
-      Result = APSInt(std::min(Elem1, Elem2),
-                      Call->getType()->isUnsignedIntegerOrEnumerationType());
-    } else {
-      llvm_unreachable("Wrong builtin ID");
+        INT_TYPE_SWITCH_NO_BOOL(ElemT,
+                { Dst.elem<T>(I) = static_cast<T>(Result); });
     }
+    Dst.initializeAllElements();
 
-    INT_TYPE_SWITCH_NO_BOOL(ElemT,
-                            { Dst.elem<T>(I) = static_cast<T>(Result); });
-  }
-  Dst.initializeAllElements();
-
-  return true;
+    return true;
 }
 
 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
-                      uint32_t BuiltinID) {
-  if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
-    return Invalid(S, OpPC);
-
-  const InterpFrame *Frame = S.Current;
-  switch (BuiltinID) {
-  case Builtin::BI__builtin_is_constant_evaluated:
-    return interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_assume:
-  case Builtin::BI__assume:
-    return interp__builtin_assume(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_strcmp:
-  case Builtin::BIstrcmp:
-  case Builtin::BI__builtin_strncmp:
-  case Builtin::BIstrncmp:
-  case Builtin::BI__builtin_wcsncmp:
-  case Builtin::BIwcsncmp:
-  case Builtin::BI__builtin_wcscmp:
-  case Builtin::BIwcscmp:
-    return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
-
-  case Builtin::BI__builtin_strlen:
-  case Builtin::BIstrlen:
-  case Builtin::BI__builtin_wcslen:
-  case Builtin::BIwcslen:
-    return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
-
-  case Builtin::BI__builtin_nan:
-  case Builtin::BI__builtin_nanf:
-  case Builtin::BI__builtin_nanl:
-  case Builtin::BI__builtin_nanf16:
-  case Builtin::BI__builtin_nanf128:
-    return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
-
-  case Builtin::BI__builtin_nans:
-  case Builtin::BI__builtin_nansf:
-  case Builtin::BI__builtin_nansl:
-  case Builtin::BI__builtin_nansf16:
-  case Builtin::BI__builtin_nansf128:
-    return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
-
-  case Builtin::BI__builtin_huge_val:
-  case Builtin::BI__builtin_huge_valf:
-  case Builtin::BI__builtin_huge_vall:
-  case Builtin::BI__builtin_huge_valf16:
-  case Builtin::BI__builtin_huge_valf128:
-  case Builtin::BI__builtin_inf:
-  case Builtin::BI__builtin_inff:
-  case Builtin::BI__builtin_infl:
-  case Builtin::BI__builtin_inff16:
-  case Builtin::BI__builtin_inff128:
-    return interp__builtin_inf(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_copysign:
-  case Builtin::BI__builtin_copysignf:
-  case Builtin::BI__builtin_copysignl:
-  case Builtin::BI__builtin_copysignf128:
-    return interp__builtin_copysign(S, OpPC, Frame);
-
-  case Builtin::BI__builtin_fmin:
-  case Builtin::BI__builtin_fminf:
-  case Builtin::BI__builtin_fminl:
-  case Builtin::BI__builtin_fminf16:
-  case Builtin::BI__builtin_fminf128:
-    return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
-
-  case Builtin::BI__builtin_fminimum_num:
-  case Builtin::BI__builtin_fminimum_numf:
-  case Builtin::BI__builtin_fminimum_numl:
-  case Builtin::BI__builtin_fminimum_numf16:
-  case Builtin::BI__builtin_fminimum_numf128:
-    return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
-
-  case Builtin::BI__builtin_fmax:
-  case Builtin::BI__builtin_fmaxf:
-  case Builtin::BI__builtin_fmaxl:
-  case Builtin::BI__builtin_fmaxf16:
-  case Builtin::BI__builtin_fmaxf128:
-    return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
-
-  case Builtin::BI__builtin_fmaximum_num:
-  case Builtin::BI__builtin_fmaximum_numf:
-  case Builtin::BI__builtin_fmaximum_numl:
-  case Builtin::BI__builtin_fmaximum_numf16:
-  case Builtin::BI__builtin_fmaximum_numf128:
-    return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
-
-  case Builtin::BI__builtin_isnan:
-    return interp__builtin_isnan(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_issignaling:
-    return interp__builtin_issignaling(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_isinf:
-    return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
-
-  case Builtin::BI__builtin_isinf_sign:
-    return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
-
-  case Builtin::BI__builtin_isfinite:
-    return interp__builtin_isfinite(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_isnormal:
-    return interp__builtin_isnormal(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_issubnormal:
-    return interp__builtin_issubnormal(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_iszero:
-    return interp__builtin_iszero(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_signbit:
-  case Builtin::BI__builtin_signbitf:
-  case Builtin::BI__builtin_signbitl:
-    return interp__builtin_signbit(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_isgreater:
-  case Builtin::BI__builtin_isgreaterequal:
-  case Builtin::BI__builtin_isless:
-  case Builtin::BI__builtin_islessequal:
-  case Builtin::BI__builtin_islessgreater:
-  case Builtin::BI__builtin_isunordered:
-    return interp_floating_comparison(S, OpPC, Call, BuiltinID);
-
-  case Builtin::BI__builtin_isfpclass:
-    return interp__builtin_isfpclass(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_fpclassify:
-    return interp__builtin_fpclassify(S, OpPC, Frame, Call);
-
-  case Builtin::BI__builtin_fabs:
-  case Builtin::BI__builtin_fabsf:
-  case Builtin::BI__builtin_fabsl:
-  case Builtin::BI__builtin_fabsf128:
-    return interp__builtin_fabs(S, OpPC, Frame);
+        uint32_t BuiltinID) {
+    if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
+        return Invalid(S, OpPC);
+
+    const InterpFrame *Frame = S.Current;
+    switch (BuiltinID) {
+        case Builtin::BI__builtin_is_constant_evaluated:
+            return interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_assume:
+        case Builtin::BI__assume:
+            return interp__builtin_assume(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_strcmp:
+        case Builtin::BIstrcmp:
+        case Builtin::BI__builtin_strncmp:
+        case Builtin::BIstrncmp:
+        case Builtin::BI__builtin_wcsncmp:
+        case Builtin::BIwcsncmp:
+        case Builtin::BI__builtin_wcscmp:
+        case Builtin::BIwcscmp:
+            return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
+
+        case Builtin::BI__builtin_strlen:
+        case Builtin::BIstrlen:
+        case Builtin::BI__builtin_wcslen:
+        case Builtin::BIwcslen:
+            return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
+
+        case Builtin::BI__builtin_nan:
+        case Builtin::BI__builtin_nanf:
+        case Builtin::BI__builtin_nanl:
+        case Builtin::BI__builtin_nanf16:
+        case Builtin::BI__builtin_nanf128:
+            return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
+
+        case Builtin::BI__builtin_nans:
+        case Builtin::BI__builtin_nansf:
+        case Builtin::BI__builtin_nansl:
+        case Builtin::BI__builtin_nansf16:
+        case Builtin::BI__builtin_nansf128:
+            return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
+
+        case Builtin::BI__builtin_huge_val:
+        case Builtin::BI__builtin_huge_valf:
+        case Builtin::BI__builtin_huge_vall:
+        case Builtin::BI__builtin_huge_valf16:
+        case Builtin::BI__builtin_huge_valf128:
+        case Builtin::BI__builtin_inf:
+        case Builtin::BI__builtin_inff:
+        case Builtin::BI__builtin_infl:
+        case Builtin::BI__builtin_inff16:
+        case Builtin::BI__builtin_inff128:
+            return interp__builtin_inf(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_copysign:
+        case Builtin::BI__builtin_copysignf:
+        case Builtin::BI__builtin_copysignl:
+        case Builtin::BI__builtin_copysignf128:
+            return interp__builtin_copysign(S, OpPC, Frame);
+
+        case Builtin::BI__builtin_fmin:
+        case Builtin::BI__builtin_fminf:
+        case Builtin::BI__builtin_fminl:
+        case Builtin::BI__builtin_fminf16:
+        case Builtin::BI__builtin_fminf128:
+            return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
+
+        case Builtin::BI__builtin_fminimum_num:
+        case Builtin::BI__builtin_fminimum_numf:
+        case Builtin::BI__builtin_fminimum_numl:
+        case Builtin::BI__builtin_fminimum_numf16:
+        case Builtin::BI__builtin_fminimum_numf128:
+            return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
+
+        case Builtin::BI__builtin_fmax:
+        case Builtin::BI__builtin_fmaxf:
+        case Builtin::BI__builtin_fmaxl:
+        case Builtin::BI__builtin_fmaxf16:
+        case Builtin::BI__builtin_fmaxf128:
+            return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
+
+        case Builtin::BI__builtin_fmaximum_num:
+        case Builtin::BI__builtin_fmaximum_numf:
+        case Builtin::BI__builtin_fmaximum_numl:
+        case Builtin::BI__builtin_fmaximum_numf16:
+        case Builtin::BI__builtin_fmaximum_numf128:
+            return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
+
+        case Builtin::BI__builtin_isnan:
+            return interp__builtin_isnan(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_issignaling:
+            return interp__builtin_issignaling(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_isinf:
+            return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
+
+        case Builtin::BI__builtin_isinf_sign:
+            return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
+
+        case Builtin::BI__builtin_isfinite:
+            return interp__builtin_isfinite(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_isnormal:
+            return interp__builtin_isnormal(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_issubnormal:
+            return interp__builtin_issubnormal(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_iszero:
+            return interp__builtin_iszero(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_signbit:
+        case Builtin::BI__builtin_signbitf:
+        case Builtin::BI__builtin_signbitl:
+            return interp__builtin_signbit(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_isgreater:
+        case Builtin::BI__builtin_isgreaterequal:
+        case Builtin::BI__builtin_isless:
+        case Builtin::BI__builtin_islessequal:
+        case Builtin::BI__builtin_islessgreater:
+        case Builtin::BI__builtin_isunordered:
+            return interp_floating_comparison(S, OpPC, Call, BuiltinID);
+
+        case Builtin::BI__builtin_isfpclass:
+            return interp__builtin_isfpclass(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_fpclassify:
+            return interp__builtin_fpclassify(S, OpPC, Frame, Call);
+
+        case Builtin::BI__builtin_fabs:
+        case Builtin::BI__builtin_fabsf:
+        case Builtin::BI__builtin_fabsl:
+        case Builtin::BI__builtin_fabsf128:
+            return interp__builtin_fabs(S, OpPC, Frame);
 
   case Builtin::BI__builtin_abs:
   case Builtin::BI__builtin_labs:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 1c8d9706788c4..d1cdfc16c5ef7 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11610,7 +11610,6 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
   switch (E->getBuiltinCallee()) {
   default:
     return false;
-  case Builtin::BI__builtin_elementwise_abs:
   case Builtin::BI__builtin_elementwise_popcount:
   case Builtin::BI__builtin_elementwise_bitreverse: {
     APValue Source;
@@ -11635,18 +11634,35 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
             APValue(APSInt(Elt.reverseBits(),
                            DestEltTy->isUnsignedIntegerOrEnumerationType())));
         break;
-      case Builtin::BI__builtin_elementwise_abs: {
-        APInt Val = Source.getVectorElt(EltNum).getInt().abs();
-        ResultElements.push_back(APValue(
-            APSInt(Val, DestEltTy->isUnsignedIntegerOrEnumerationType())));
-        break;
-      }
       }
     }
 
     return Success(APValue(ResultElements.data(), ResultElements.size()), E);
   }
 
+  case Builtin::BI__builtin_elementwise_abs: {
+    APValue Source;
+    if (!EvaluateAsRValue(Info, E->getArg(0), Source))
+      return false;
+
+    QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+    unsigned SourceLen = Source.getVectorLength();
+    SmallVector<APValue, 4> ResultElements;
+    ResultElements.reserve(SourceLen);
+
+    for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
+      APValue CurrentEle = Source.getVectorElt(EltNum);
+      APValue Val = DestEltTy->isFloatingType()
+                        ? APValue(llvm::abs(CurrentEle.getFloat()))
+                        : APValue(APSInt(
+                              CurrentEle.getInt().abs(),
+                              DestEltTy->isUnsignedIntegerOrEnumerationType()));
+      ResultElements.push_back(Val);
+    }
+
+    return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+  }
+
   case Builtin::BI__builtin_elementwise_add_sat:
   case Builtin::BI__builtin_elementwise_sub_sat:
   case clang::X86::BI__builtin_ia32_pmulhuw128:
@@ -15894,6 +15910,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
       return Error(E);
     return true;
 
+  case Builtin::BI__builtin_elementwise_abs:
   case Builtin::BI__builtin_fabs:
   case Builtin::BI__builtin_fabsf:
   case Builtin::BI__builtin_fabsl:
diff --git a/clang/test/Sema/constant-builtins-vector.cpp b/clang/test/Sema/constant-builtins-vector.cpp
index 85fb4930d09c0..2b7d76e36ce96 100644
--- a/clang/test/Sema/constant-builtins-vector.cpp
+++ b/clang/test/Sema/constant-builtins-vector.cpp
@@ -880,5 +880,17 @@ static_assert(__builtin_bit_cast(unsigned long long, __builtin_elementwise_min((
 static_assert(__builtin_elementwise_abs(10) == 10);
 static_assert(__builtin_elementwise_abs(-10) == 10);
 static_assert(__builtin_bit_cast(unsigned, __builtin_elementwise_abs((vector4char){-1, -2, -3, 4})) == (LITTLE_END ? 0x04030201 : 0x01020304));
-// the absolute value of the most negative integer remains the most negative integer
-static_assert(__builtin_elementwise_abs((int)(-2147483648)) == (int)(-2147483648));
+static_assert(__builtin_elementwise_abs((int)(-2147483648)) == (int)(-2147483648)); // the absolute value of the most negative integer remains the most negative integer
+
+// check floating point for elementwise abs
+#define CHECK_FOUR_FLOAT_VEC(vec1, vec2) \
+    static_assert(__builtin_fabs(vec1[0] - vec2[0]) < 1e-6); \
+    static_assert(__builtin_fabs(vec1[1] - vec2[1]) < 1e-6); \
+    static_assert(__builtin_fabs(vec1[2] - vec2[2]) < 1e-6); \
+    static_assert(__builtin_fabs(vec1[3] - vec2[3]) < 1e-6);
+
+// checking floating point vector
+CHECK_FOUR_FLOAT_VEC(__builtin_elementwise_abs((vector4float){-1.123, 2.123, -3.123, 4.123}), ((vector4float){1.123, 2.123, 3.123, 4.123}))
+CHECK_FOUR_FLOAT_VEC(__builtin_elementwise_abs((vector4double){-1.123, 2.123, -3.123, 4.123}), ((vector4double){1.123, 2.123, 3.123, 4.123}))
+static_assert(__builtin_elementwise_abs((float)-1.123) - (float)1.123 < 1e-6); // making sure one element works
+#undef CHECK_FOUR_FLOAT_VEC



More information about the cfe-commits mailing list