[llvm-branch-commits] [clang] [clang-tools-extra] [libc] [libclc] [libcxx] [lldb] [llvm] [CIR] Split CIR_UnaryOp into individual operations (PR #185179)

Henrich Lauko via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Sun Mar 8 06:07:51 PDT 2026


Andrzej =?utf-8?q?WarzyƄski?= <andrzej.warzynski at arm.com>,Amr Hesham
 <amr96 at programmer.net>,Ayush Kumar Gaur
 <132849148+Ayush3941 at users.noreply.github.com>,Amr Hesham
 <amr96 at programmer.net>,Alexis Engelke <engelke at in.tum.de>,Aditya Singh
 <aditya26189 at kgpian.iitkgp.ac.in>,Jakub Kuderski <jakub at nod-labs.com>,Florian
 Hahn <flo at fhahn.com>,Yingwei Zheng <dtcxzyw2333 at gmail.com>,Alexey Bataev
 <a.bataev at outlook.com>,Matt Arsenault <Matthew.Arsenault at amd.com>,Simon
 Pilgrim <llvm-dev at redking.me.uk>,Alexey Bataev <a.bataev at outlook.com>,Keith
 Smiley <keithbsmiley at gmail.com>,Keith Smiley <keithbsmiley at gmail.com>,Saleem
 Abdulrasool <compnerd at compnerd.org>,Nerixyz <nerixdev at outlook.de>,Aiden
 Grossman <aidengrossman at google.com>,owenca <owenpiano at gmail.com>,Louis
 Dionne <ldionne.2 at gmail.com>,Matt Arsenault <Matthew.Arsenault at amd.com>,William
 Tran-Viet <wtranviet at proton.me>,Lang Hames <lhames at gmail.com>,Joseph Huber
 <huberjn at outlook.com>,Lucas Chollet <lucas.chollet at free.fr>,Timm Baeder
 <tbaeder at redhat.com>,Alexey Samsonov <vonosmas at gmail.com>,Younan Zhang
 <zyn7109 at gmail.com>,Matt Arsenault <Matthew.Arsenault at amd.com>,xlauko
 <xlauko at mail.muni.cz>,xlauko <xlauko at mail.muni.cz>,xlauko
 <xlauko at mail.muni.cz>
Message-ID:
In-Reply-To: <llvm.org/llvm/llvm-project/pull/185179 at github.com>


https://github.com/xlauko updated https://github.com/llvm/llvm-project/pull/185179

>From 66e85c275fb587df9ff1ff1d3c26664c32ed5d49 Mon Sep 17 00:00:00 2001
From: Michael Buch <michaelbuch12 at gmail.com>
Date: Sat, 7 Mar 2026 11:27:30 +0000
Subject: [PATCH 01/33] [lldb][PlatformDarwin][test] Add unit-test for
 LocateExecutableScriptingResourcesFromDSYM (#185057)

Depends on:
* https://github.com/llvm/llvm-project/pull/185056

Adds test unit-test for
`PlatformDarwin::LocateExecutableScriptingResourcesFromDSYM`. I had to
mock the `ScriptInterpreter` because the function internally uses the
`ScriptInterpreterPython` to check for reserved words in file names. But
linking the `ScriptInterpreterPython` was quite the undertaking, which I
think we could pull off, but required more churn that I had hoped.
Mocking it seemed pretty low-cost so I resorted to doing that instead.

Test-cases aren't very elaborate yet. It just has a basic assertion that
we locate the Python file within the dSYM. Mostly I set up
infrastructure for future test-cases.
---
 lldb/unittests/Platform/CMakeLists.txt        |   1 +
 .../unittests/Platform/PlatformDarwinTest.cpp | 138 +++++++++++++++++-
 2 files changed, 138 insertions(+), 1 deletion(-)

diff --git a/lldb/unittests/Platform/CMakeLists.txt b/lldb/unittests/Platform/CMakeLists.txt
index 9a4fd8b3cd0c6..a96636fc3fd59 100644
--- a/lldb/unittests/Platform/CMakeLists.txt
+++ b/lldb/unittests/Platform/CMakeLists.txt
@@ -12,6 +12,7 @@ add_lldb_unittest(LLDBPlatformTests
     lldbPluginPlatformLinux
     lldbPluginPlatformMacOSX
     lldbPluginPlatformNetBSD
+    lldbUtilityHelpers
   )
 
 add_subdirectory(Android)
diff --git a/lldb/unittests/Platform/PlatformDarwinTest.cpp b/lldb/unittests/Platform/PlatformDarwinTest.cpp
index 72494d0a2667b..5947ea6428f54 100644
--- a/lldb/unittests/Platform/PlatformDarwinTest.cpp
+++ b/lldb/unittests/Platform/PlatformDarwinTest.cpp
@@ -9,15 +9,94 @@
 #include "gtest/gtest.h"
 
 #include "Plugins/Platform/MacOSX/PlatformDarwin.h"
+#include "Plugins/Platform/MacOSX/PlatformMacOSX.h"
+#include "Plugins/Platform/MacOSX/PlatformRemoteMacOSX.h"
+#include "TestingSupport/SubsystemRAII.h"
+#include "TestingSupport/TestUtilities.h"
+#include "lldb/Core/Debugger.h"
+#include "lldb/Core/PluginManager.h"
+#include "lldb/Host/HostInfo.h"
+#include "lldb/Interpreter/ScriptInterpreter.h"
 
 #include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FileSystem.h"
 
+#include <memory>
 #include <tuple>
 
 using namespace lldb;
 using namespace lldb_private;
 
-TEST(PlatformDarwinTest, TestParseVersionBuildDir) {
+namespace {
+class MockScriptInterpreterPython : public ScriptInterpreter {
+public:
+  MockScriptInterpreterPython(Debugger &debugger)
+      : ScriptInterpreter(debugger,
+                          lldb::ScriptLanguage::eScriptLanguagePython) {}
+
+  ~MockScriptInterpreterPython() override = default;
+
+  bool ExecuteOneLine(llvm::StringRef command, CommandReturnObject *,
+                      const ExecuteScriptOptions &) override {
+    return false;
+  }
+
+  void ExecuteInterpreterLoop() override {}
+
+  static void Initialize() {
+    PluginManager::RegisterPlugin(GetPluginNameStatic(),
+                                  GetPluginDescriptionStatic(),
+                                  lldb::eScriptLanguagePython, CreateInstance);
+  }
+
+  static void Terminate() {}
+
+  static lldb::ScriptInterpreterSP CreateInstance(Debugger &debugger) {
+    return std::make_shared<MockScriptInterpreterPython>(debugger);
+  }
+
+  static llvm::StringRef GetPluginNameStatic() {
+    return "MockScriptInterpreterPython";
+  }
+
+  static llvm::StringRef GetPluginDescriptionStatic() {
+    return "MockScriptInterpreterPython";
+  }
+
+  // PluginInterface protocol
+  llvm::StringRef GetPluginName() override { return GetPluginNameStatic(); }
+};
+
+LLDB_PLUGIN_DEFINE(MockScriptInterpreterPython)
+} // namespace
+
+struct PlatformDarwinTest : public testing::Test {
+protected:
+  void SetUp() override {
+    std::call_once(TestUtilities::g_debugger_initialize_flag,
+                   []() { Debugger::Initialize(nullptr); });
+  };
+
+  DebuggerSP m_debugger_sp;
+  PlatformSP m_platform_sp;
+  SubsystemRAII<FileSystem, HostInfo, PlatformMacOSX,
+                MockScriptInterpreterPython>
+      subsystems;
+};
+
+static std::string CreateFile(llvm::StringRef filename,
+                              llvm::SmallString<128> parent_dir) {
+  llvm::SmallString<128> path(parent_dir);
+  llvm::sys::path::append(path, filename);
+  int fd;
+  std::error_code ret = llvm::sys::fs::openFileForWrite(path, fd);
+  assert(!ret && "Failed to create test file.");
+  ::close(fd);
+
+  return path.c_str();
+}
+
+TEST_F(PlatformDarwinTest, TestParseVersionBuildDir) {
   llvm::VersionTuple V;
   llvm::StringRef D;
 
@@ -44,3 +123,60 @@ TEST(PlatformDarwinTest, TestParseVersionBuildDir) {
   std::tie(V, D) = PlatformDarwin::ParseVersionBuildDir("3.4.5");
   EXPECT_EQ(llvm::VersionTuple(3, 4, 5), V);
 }
+
+TEST_F(PlatformDarwinTest, LocateExecutableScriptingResourcesFromDSYM) {
+  TargetSP target_sp;
+  ArchSpec arch("x86_64-apple-macosx-");
+  auto platform_sp = PlatformRemoteMacOSX::CreateInstance(true, &arch);
+  Platform::SetHostPlatform(platform_sp);
+
+  m_debugger_sp = Debugger::CreateInstance();
+
+  m_debugger_sp->GetTargetList().CreateTarget(*m_debugger_sp, "", arch,
+                                              lldb_private::eLoadDependentsNo,
+                                              m_platform_sp, target_sp);
+
+  ASSERT_TRUE(target_sp);
+
+  llvm::SmallString<128> tmp_dir;
+  ASSERT_FALSE(llvm::sys::fs::createUniqueDirectory(
+      "locate-scripts-from-dsym-test", tmp_dir))
+      << "Failed to create test directory.";
+
+  // Create dummy module file at <test-root>/TestModule.o
+  FileSpec module_fspec(CreateFile("TestModule.o", tmp_dir));
+  ASSERT_TRUE(module_fspec);
+
+  // Create <test-root>/.dSYM/Contents/Resources
+  llvm::SmallString<128> dsym_resource_dir(tmp_dir);
+  llvm::sys::path::append(tmp_dir, ".dSYM", "Contents", "Resources");
+  ASSERT_FALSE(llvm::sys::fs::create_directory(dsym_resource_dir))
+      << "Failed to create test dSYM root directory.";
+
+  // Create .dSYM/Contents/Resources/DWARF
+  llvm::SmallString<128> dwarf_dir(dsym_resource_dir);
+  llvm::sys::path::append(dwarf_dir, "DWARF");
+  ASSERT_FALSE(llvm::sys::fs::create_directory(dwarf_dir))
+      << "Failed to create test dSYM DWARF directory.";
+
+  // Create dummy module file at
+  // <test-root>/.dSYM/Contents/Resources/DWARF/TestModule.o
+  FileSpec dsym_module_fpec(CreateFile("TestModule.o", dwarf_dir));
+  ASSERT_TRUE(dsym_module_fpec);
+
+  // Create .dSYM/Contents/Resources/Python
+  llvm::SmallString<128> python_dir(dsym_resource_dir);
+  llvm::sys::path::append(python_dir, "Python");
+  ASSERT_FALSE(llvm::sys::fs::create_directory(python_dir))
+      << "Failed to create test dSYM Python directory.";
+
+  CreateFile("TestModule.py", python_dir);
+  CreateFile("TestModule.txt", python_dir);
+  CreateFile("TestModule.sh", python_dir);
+
+  StreamString ss;
+  FileSpecList fspecs = std::static_pointer_cast<PlatformDarwin>(platform_sp)
+                            ->LocateExecutableScriptingResourcesFromDSYM(
+                                ss, module_fspec, *target_sp, dsym_module_fpec);
+  EXPECT_EQ(fspecs.GetSize(), 1u);
+}

>From b8ea766938b4b2812f9b833a883ad02c92852ba6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= <andrzej.warzynski at arm.com>
Date: Sat, 7 Mar 2026 11:56:06 +0000
Subject: [PATCH 02/33] [Clang][AArch64] Reorganize tests for `vceqz`
 intrinsics (NFC) (#185090)

Group related `vceqz_*` and `vceqzd_*` tests together for consistency
and readability. Add a comment documenting the scalar variants that are
not currently covered.

No functional change.

Follow-up to #184893.
---
 clang/test/CodeGen/AArch64/neon/fullfp16.c   |  11 +-
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 108 ++++++++++---------
 2 files changed, 67 insertions(+), 52 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/fullfp16.c b/clang/test/CodeGen/AArch64/neon/fullfp16.c
index 77b6c09de857d..3cac22ee5ad54 100644
--- a/clang/test/CodeGen/AArch64/neon/fullfp16.c
+++ b/clang/test/CodeGen/AArch64/neon/fullfp16.c
@@ -9,6 +9,13 @@
 //
 // Tests for unconstrained intrinsics that require the fullfp16 extension.
 //
+// This file contains tests that were originally located in
+//  *  clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
+// The main difference is the use of RUN lines that enable ClangIR lowering;
+// therefore only builtins currently supported by ClangIR are tested here.
+// Once ClangIR support is complete, this file is intended to replace the
+// original test file.
+//
 // These intrinsics expand to code containing multiple compound and declaration
 // statements rather than just plain function calls, which leads to:
 //  * "scopes" at the CIR level, and then
@@ -19,10 +26,6 @@
 //
 // TODO: Remove `-simplifycfg` once CIR lowering includes the relevant
 //       optimizations to reduce the CFG.
-//
-// TODO: Merge this file with
-//        * clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c
-//       (the source of these tests).
 //=============================================================================
 
 #include <arm_fp16.h>
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index a711245b33723..47d2a58afb550 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -7,11 +7,19 @@
 //=============================================================================
 // NOTES
 //
+// This file contains tests that were originally located in
+//  *  clang/test/CodeGen/AArch64/neon-intrinsics.c.
+// The main difference is the use of RUN lines that enable ClangIR lowering;
+// therefore only builtins currently supported by ClangIR are tested here.
+// Once ClangIR support is complete, this file is intended to replace the
+// original test file.
+//
 // ACLE section headings based on v2025Q2 of the ACLE specification:
 //  * https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#bitwise-equal-to-zero
 //
 // Different labels for CIR stem from an additional function call that is
 // present at the AST and CIR levels, but is inlined at the LLVM IR level.
+//
 //=============================================================================
 
 #include <arm_neon.h>
@@ -30,21 +38,6 @@ int64_t test_vnegd_s64(int64_t a) {
 //===------------------------------------------------------===//
 // 2.1.2.2 Bitwise equal to zero
 //===------------------------------------------------------===//
-// LLVM-LABEL: @test_vceqzd_s64
-// CIR-LABEL: @vceqzd_s64
-uint64_t test_vceqzd_s64(int64_t a) {
-// CIR:   [[C_0:%.*]] = cir.const #cir.int<0>
-// CIR:   [[CMP:%.*]] = cir.cmp(eq, %{{.*}}, [[C_0]]) : !s64i, !cir.bool
-// CIR:   [[RES:%.*]] = cir.cast bool_to_int [[CMP]] : !cir.bool -> !cir.int<s, 1>
-// CIR:   cir.cast integral [[RES]] : !cir.int<s, 1> -> !u64i
-
-// LLVM-SAME: i64{{.*}} [[A:%.*]])
-// LLVM:          [[TMP0:%.*]] = icmp eq i64 [[A]], 0
-// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64
-// LLVM-NEXT:    ret i64 [[VCEQZ_I]]
-  return (uint64_t)vceqzd_s64(a);
-}
-
 // LLVM-LABEL: @test_vceqz_s8(
 // CIR-LABEL: @vceqz_s8(
 uint8x8_t test_vceqz_s8(int8x8_t a) {
@@ -106,22 +99,6 @@ uint64x1_t test_vceqz_s64(int64x1_t a) {
   return vceqz_s64(a);
 }
 
-// LLVM-LABEL: @test_vceqz_u64(
-// CIR-LABEL: @vceqz_u64(
-uint64x1_t test_vceqz_u64(uint64x1_t a) {
-// CIR:   cir.cast bitcast {{%.*}} : !cir.vector<8 x !s8i> -> !cir.vector<1 x !u64i>
-// CIR:   [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<1 x !u64i>
-// CIR:   cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<1 x !u64i>, !cir.vector<1 x !s64i>
-
-// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) #[[ATTR0]] {
-// LLVM:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// LLVM-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// LLVM-NEXT:    [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer
-// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
-// LLVM-NEXT:    ret <1 x i64> [[VCEQZ_I]]
-  return vceqz_u64(a);
-}
-
 // LLVM-LABEL: @test_vceqz_p64(
 // CIR-LABEL: @vceqz_p64(
 uint64x1_t test_vceqz_p64(poly64x1_t a) {
@@ -232,6 +209,22 @@ uint32x2_t test_vceqz_u32(uint32x2_t a) {
   return vceqz_u32(a);
 }
 
+// LLVM-LABEL: @test_vceqz_u64(
+// CIR-LABEL: @vceqz_u64(
+uint64x1_t test_vceqz_u64(uint64x1_t a) {
+// CIR:   cir.cast bitcast {{%.*}} : !cir.vector<8 x !s8i> -> !cir.vector<1 x !u64i>
+// CIR:   [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<1 x !u64i>
+// CIR:   cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<1 x !u64i>, !cir.vector<1 x !s64i>
+
+// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) #[[ATTR0]] {
+// LLVM:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// LLVM-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// LLVM-NEXT:    [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer
+// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64>
+// LLVM-NEXT:    ret <1 x i64> [[VCEQZ_I]]
+  return vceqz_u64(a);
+}
+
 // LLVM-LABEL: @test_vceqzq_u8(
 // CIR-LABEL: @vceqzq_u8(
 uint8x16_t test_vceqzq_u8(uint8x16_t a) {
@@ -345,6 +338,23 @@ uint32x4_t test_vceqzq_f32(float32x4_t a) {
   return vceqzq_f32(a);
 }
 
+// LLVM-LABEL: @test_vceqzq_f64(
+// CIR-LABEL: @vceqzq_f64(
+uint64x2_t test_vceqzq_f64(float64x2_t a) {
+// CIR:   cir.cast bitcast {{%.*}} : !cir.vector<16 x !s8i> -> !cir.vector<2 x !cir.double>
+// CIR:   [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<2 x !cir.double>
+// CIR:   cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i>
+
+// LLVM-SAME: <2 x double> {{.*}} [[A:%.*]]) #[[ATTR0]] {
+// LLVM:    [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
+// LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+// LLVM-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
+// LLVM-NEXT:    [[TMP3:%.*]] = fcmp oeq <2 x double> [[TMP2]], zeroinitializer
+// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
+// LLVM-NEXT:    ret <2 x i64> [[VCEQZ_I]]
+  return vceqzq_f64(a);
+}
+
 // LLVM-LABEL: @test_vceqz_p8(
 // CIR-LABEL: @vceqz_p8(
 uint8x8_t test_vceqz_p8(poly8x8_t a) {
@@ -371,23 +381,6 @@ uint8x16_t test_vceqzq_p8(poly8x16_t a) {
   return vceqzq_p8(a);
 }
 
-// LLVM-LABEL: @test_vceqzq_f64(
-// CIR-LABEL: @vceqzq_f64(
-uint64x2_t test_vceqzq_f64(float64x2_t a) {
-// CIR:   cir.cast bitcast {{%.*}} : !cir.vector<16 x !s8i> -> !cir.vector<2 x !cir.double>
-// CIR:   [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<2 x !cir.double>
-// CIR:   cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i>
-
-// LLVM-SAME: <2 x double> {{.*}} [[A:%.*]]) #[[ATTR0]] {
-// LLVM:    [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
-// LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
-// LLVM-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
-// LLVM-NEXT:    [[TMP3:%.*]] = fcmp oeq <2 x double> [[TMP2]], zeroinitializer
-// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
-// LLVM-NEXT:    ret <2 x i64> [[VCEQZ_I]]
-  return vceqzq_f64(a);
-}
-
 // LLVM-LABEL: @test_vceqzq_p64(
 // CIR-LABEL: @vceqzq_p64(
 uint64x2_t test_vceqzq_p64(poly64x2_t a) {
@@ -404,6 +397,25 @@ uint64x2_t test_vceqzq_p64(poly64x2_t a) {
   return vceqzq_p64(a);
 }
 
+// LLVM-LABEL: @test_vceqzd_s64
+// CIR-LABEL: @vceqzd_s64
+uint64_t test_vceqzd_s64(int64_t a) {
+// CIR:   [[C_0:%.*]] = cir.const #cir.int<0>
+// CIR:   [[CMP:%.*]] = cir.cmp(eq, %{{.*}}, [[C_0]]) : !s64i, !cir.bool
+// CIR:   [[RES:%.*]] = cir.cast bool_to_int [[CMP]] : !cir.bool -> !cir.int<s, 1>
+// CIR:   cir.cast integral [[RES]] : !cir.int<s, 1> -> !u64i
+
+// LLVM-SAME: i64{{.*}} [[A:%.*]])
+// LLVM:          [[TMP0:%.*]] = icmp eq i64 [[A]], 0
+// LLVM-NEXT:    [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64
+// LLVM-NEXT:    ret i64 [[VCEQZ_I]]
+  return (uint64_t)vceqzd_s64(a);
+}
+
+// TODO SISD variants:
+// vceqzd_u64, vceqzs_f32, vceqzd_f64
+
+
 //===------------------------------------------------------===//
 // 2.1.1.6.1. Absolute difference
 //===------------------------------------------------------===//

>From 9088858f548da10aae85700ffee6b71e8221ec5e Mon Sep 17 00:00:00 2001
From: Amr Hesham <amr96 at programmer.net>
Date: Sat, 7 Mar 2026 12:56:44 +0100
Subject: [PATCH 03/33] [CIR][NFC] Merge try-catch-tmp with try-catch test file
 (#185123)

Merge try-catch-tmp.cpp the with try-catch test file
---
 clang/test/CIR/CodeGen/try-catch-tmp.cpp | 810 -----------------------
 clang/test/CIR/CodeGen/try-catch.cpp     | 802 ++++++++++++++++++++++
 2 files changed, 802 insertions(+), 810 deletions(-)
 delete mode 100644 clang/test/CIR/CodeGen/try-catch-tmp.cpp

diff --git a/clang/test/CIR/CodeGen/try-catch-tmp.cpp b/clang/test/CIR/CodeGen/try-catch-tmp.cpp
deleted file mode 100644
index 17542cded2d7d..0000000000000
--- a/clang/test/CIR/CodeGen/try-catch-tmp.cpp
+++ /dev/null
@@ -1,810 +0,0 @@
-// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -fclangir -emit-cir %s -o %t.cir
-// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
-// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -fclangir -emit-llvm %s -o %t-cir.ll
-// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
-// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll
-// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
-
-// TODO(cir): Merge this with try-catch.cpp.
-
-int division();
-
-void call_function_inside_try_catch_all() {
-  try {
-    division();
-  } catch (...) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z34call_function_inside_try_catch_allv() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   cir.try {
-// CIR:       %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:       cir.yield
-// CIR:   } catch all (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
-// CIR:       %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
-// CIR:       cir.cleanup.scope {
-// CIR:         cir.yield
-// CIR:       } cleanup {{.*}} {
-// CIR:         cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:         cir.yield
-// CIR:       }
-// CIR:       cir.yield
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z34call_function_inside_try_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr null
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[BEGIN_CATCH:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI1]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z34call_function_inside_try_catch_allv() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXN_OBJ_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_SELECTOR_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// OGCG: [[INVOKE_CONT]]:
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[LANDING_PAD]]:
-// OGCG:   %[[LP:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr null
-// OGCG:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// OGCG:   store ptr %[[EXN_OBJ]], ptr %[[EXN_OBJ_ADDR]], align 8
-// OGCG:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// OGCG:   store i32 %[[EH_SELECTOR_VAL]], ptr %[[EH_SELECTOR_ADDR]], align 4
-// OGCG:   br label %[[CATCH:.*]]
-// OGCG: [[CATCH]]:
-// OGCG:   %[[EXN_OBJ:.*]] = load ptr, ptr %[[EXN_OBJ_ADDR]], align 8
-// OGCG:   %[[CATCH_BEGIN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ]])
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-
-void call_function_inside_try_catch_with_exception_type() {
-  try {
-    division();
-  } catch (int e) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z50call_function_inside_try_catch_with_exception_typev() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e"]
-// CIR:   cir.try {
-// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:     cir.yield
-// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
-// CIR:     cir.cleanup.scope {
-// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!s32i>, !s32i
-// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR:       cir.yield
-// CIR:     } cleanup {{.*}} {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
-// CIR:     cir.resume %{{.*}} : !cir.eh_token
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z50call_function_inside_try_catch_with_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr @_ZTIi
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[DISPATCH:.*]]
-// LLVM: [[DISPATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
-// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   %[[LOAD:.*]] = load i32, ptr %[[TOKEN]], align 4
-// LLVM:   store i32 %[[LOAD]], ptr {{.*}}, align 4
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[RESUME]]:
-// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
-// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
-// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z50call_function_inside_try_catch_with_exception_typev() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[E_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
-// OGCG: [[INVOKE_NORMAL]]:
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[INVOKE_UNWIND]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr @_ZTIi
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   br label %[[CATCH_DISPATCH:.*]]
-// OGCG: [[CATCH_DISPATCH]]:
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
-// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
-// OGCG: [[CATCH_EXCEPTION]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   %[[TMP_BEGIN_CATCH:.*]] = load i32, ptr %[[BEGIN_CATCH]], align 4
-// OGCG:   store i32 %[[TMP_BEGIN_CATCH]], ptr %[[E_ADDR]], align 4
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-// OGCG: [[EH_RESUME]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
-// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
-// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-
-void call_function_inside_try_catch_with_ref_exception_type() {
-  try {
-    division();
-  } catch (int &ref) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z54call_function_inside_try_catch_with_ref_exception_typev() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["ref", const]
-// CIR:   cir.try {
-// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:     cir.yield
-// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%{{.*}}: !cir.eh_token {{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %{{.*}} : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
-// CIR:     cir.cleanup.scope {
-// CIR:       cir.store {{.*}} %[[EXN_PTR]], %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
-// CIR:       cir.yield
-// CIR:     } cleanup all {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } unwind (%{{.*}}: !cir.eh_token {{.*}}) {
-// CIR:     cir.resume %{{.*}} : !cir.eh_token
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z54call_function_inside_try_catch_with_ref_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT:.*]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr @_ZTIi
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[DISPATCH:.*]]
-// LLVM: [[DISPATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
-// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   store ptr %[[TOKEN]], ptr %{{.*}}, align 8
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[RESUME]]:
-// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
-// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
-// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z54call_function_inside_try_catch_with_ref_exception_typev() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[E_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
-// OGCG: [[INVOKE_NORMAL]]:
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[INVOKE_UNWIND]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr @_ZTIi
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %1, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   br label %[[CATCH_DISPATCH:.*]]
-// OGCG: [[CATCH_DISPATCH]]:
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
-// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
-// OGCG: [[CATCH_EXCEPTION]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   store ptr %[[BEGIN_CATCH]], ptr %[[E_ADDR]], align 8
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-// OGCG: [[EH_RESUME]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
-// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
-// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-
-void call_function_inside_try_catch_with_complex_exception_type() {
-  try {
-    division();
-  } catch (int _Complex e) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z58call_function_inside_try_catch_with_complex_exception_typev() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["e"]
-// CIR:   cir.try {
-// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:     cir.yield
-// CIR:   } catch [type #cir.global_view<@_ZTICi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!cir.complex<!s32i>>)
-// CIR:     cir.cleanup.scope {
-// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
-// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
-// CIR:       cir.yield
-// CIR:     } cleanup {{.*}} {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
-// CIR:     cir.resume %{{.*}} : !cir.eh_token
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z58call_function_inside_try_catch_with_complex_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr @_ZTICi
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[DISPATCH:.*]]
-// LLVM: [[DISPATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTICi)
-// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
-// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   %[[LOAD:.*]] = load { i32, i32 }, ptr %[[TOKEN]], align 4
-// LLVM:   store { i32, i32 } %[[LOAD]], ptr {{.*}}, align 4
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[RESUME]]:
-// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
-// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
-// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z58call_function_inside_try_catch_with_complex_exception_typev() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[E_ADDR:.*]] = alloca { i32, i32 }, align 4
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
-// OGCG: [[INVOKE_NORMAL]]:
-// OGCG:    br label %[[TRY_CONT:.*]]
-// OGCG: [[INVOKE_UNWIND]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr @_ZTICi
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   br label %[[CATCH_DISPATCH:.*]]
-// OGCG: [[CATCH_DISPATCH]]:
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTICi)
-// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
-// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
-// OGCG: [[CATCH_EXCEPTION]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   %[[EXCEPTION_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[BEGIN_CATCH]], i32 0, i32 0
-// OGCG:   %[[EXCEPTION_REAL:.*]] = load i32, ptr %[[EXCEPTION_REAL_PTR]], align 4
-// OGCG:   %[[EXCEPTION_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[BEGIN_CATCH]], i32 0, i32 1
-// OGCG:   %[[EXCEPTION_IMAG:.*]] = load i32, ptr %[[EXCEPTION_IMAG_PTR]], align 4
-// OGCG:   %[[E_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[E_ADDR]], i32 0, i32 0
-// OGCG:   %[[E_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[E_ADDR]], i32 0, i32 1
-// OGCG:   store i32 %[[EXCEPTION_REAL]], ptr %[[E_REAL_PTR]], align 4
-// OGCG:   store i32 %[[EXCEPTION_IMAG]], ptr %[[E_IMAG_PTR]], align 4
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-// OGCG: [[EH_RESUME]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
-// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
-// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-
-void call_function_inside_try_catch_with_array_exception_type() {
-  try {
-    division();
-  } catch (int e[]) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z56call_function_inside_try_catch_with_array_exception_typev() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   %[[E_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["e"]
-// CIR:   cir.try {
-// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:     cir.yield
-// CIR:   } catch [type #cir.global_view<@_ZTIPi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
-// CIR:     cir.cleanup.scope {
-// CIR:       cir.store {{.*}} %[[EXN_PTR]], %[[E_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
-// CIR:       cir.yield
-// CIR:     } cleanup {{.*}} {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
-// CIR:     cir.resume %{{.*}} : !cir.eh_token
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z56call_function_inside_try_catch_with_array_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr @_ZTIPi
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[DISPATCH:.*]]
-// LLVM: [[DISPATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIPi)
-// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
-// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   store ptr %[[TOKEN]], ptr {{.*}}, align 8
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[RESUME]]:
-// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
-// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
-// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z56call_function_inside_try_catch_with_array_exception_typev() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[E_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:          to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
-// OGCG: [[INVOKE_NORMAL]]:
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[INVOKE_UNWIND]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr @_ZTIPi
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   br label %[[CATCH_DISPATCH:.*]]
-// OGCG: [[CATCH_DISPATCH]]:
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %ehselector.slot, align 4
-// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIPi)
-// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
-// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
-// OGCG: [[CATCH_EXCEPTION]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   store ptr %[[BEGIN_CATCH]], ptr %[[E_ADDR]], align 8
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-// OGCG: [[EH_RESUME]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
-// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
-// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
-
-void call_function_inside_try_catch_with_exception_type_and_catch_all() {
-  try {
-    division();
-  } catch (int e) {
-  } catch (...) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() personality(@__gxx_personality_v0)
-// CIR: cir.scope {
-// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e"]
-// CIR:   cir.try {
-// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
-// CIR:     cir.yield
-// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
-// CIR:     cir.cleanup.scope {
-// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!s32i>, !s32i
-// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
-// CIR:       cir.yield
-// CIR:     } cleanup {{.*}} {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } catch all (%[[EH_TOKEN2:.*]]: !cir.eh_token{{.*}}) {
-// CIR:     %[[CATCH_TOKEN2:.*]], %{{.*}} = cir.begin_catch %[[EH_TOKEN2]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
-// CIR:     cir.cleanup.scope {
-// CIR:       cir.yield
-// CIR:     } cleanup {{.*}} {
-// CIR:       cir.end_catch %[[CATCH_TOKEN2]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           catch ptr @_ZTIi
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   br label %[[DISPATCH:.*]]
-// LLVM: [[DISPATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
-// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[CATCH_ALL:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   %[[LOAD:.*]] = load i32, ptr %[[TOKEN]], align 4
-// LLVM:   store i32 %[[LOAD]], ptr {{.*}}, align 4
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[CATCH_ALL]]:
-// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
-// LLVM:   %[[TOKEN2:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI3]])
-// LLVM:   br label %[[CATCH_ALL_BODY:.*]]
-// LLVM: [[CATCH_ALL_BODY]]:
-// LLVM:   br label %[[END_CATCH2:.*]]
-// LLVM: [[END_CATCH2]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH2:.*]]
-// LLVM: [[END_DISPATCH2]]:
-// LLVM:   br label %[[END_TRY2:.*]]
-// LLVM: [[END_TRY2]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() #0 personality ptr @__gxx_personality_v0
-// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
-// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[E_ADDR:.*]] = alloca i32, align 4
-// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
-// OGCG: [[INVOKE_NORMAL]]:
-// OGCG:   br label %try.cont
-// OGCG: [[INVOKE_UNWIND]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr @_ZTIi
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   br label %[[CATCH_DISPATCH:.*]]
-// OGCG: [[CATCH_DISPATCH]]:
-// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
-// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
-// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
-// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[CATCH_ALL:.*]]
-// OGCG: [[CATCH_EXCEPTION]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   %[[TMP_BEGIN_CATCH:.*]] = load i32, ptr %[[BEGIN_CATCH]], align 4
-// OGCG:   store i32 %[[TMP_BEGIN_CATCH]], ptr %[[E_ADDR]], align 4
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[TRY_CONT]]:
-// OGCG:   ret void
-// OGCG: [[CATCH_ALL]]:
-// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
-
-struct S {
-  ~S();
-};
-
-void cleanup_inside_try_body() {
-  try {
-    S s;
-    division();
-  } catch (...) {
-  }
-}
-
-// CIR: cir.func {{.*}} @_Z23cleanup_inside_try_bodyv(){{.*}} personality(@__gxx_personality_v0) {
-// CIR: cir.scope {
-// CIR:   %[[S:.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s"]
-// CIR:   cir.try {
-// CIR:     cir.cleanup.scope {
-// CIR:       cir.call @_Z8divisionv()
-// CIR:       cir.yield
-// CIR:     } cleanup  all {
-// CIR:       cir.call @_ZN1SD1Ev(%[[S]])
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   } catch all (%[[TOKEN:.*]]: !cir.eh_token {{.*}}) {
-// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
-// CIR:     cir.cleanup.scope {
-// CIR:       cir.yield
-// CIR:     } cleanup  all {
-// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
-// CIR:       cir.yield
-// CIR:     }
-// CIR:     cir.yield
-// CIR:   }
-// CIR: }
-
-// LLVM: define {{.*}} void @_Z23cleanup_inside_try_bodyv() {{.*}} personality ptr @__gxx_personality_v0
-// LLVM:   br label %[[TRY_SCOPE:.*]]
-// LLVM: [[TRY_SCOPE]]:
-// LLVM:   br label %[[TRY_BEGIN:.*]]
-// LLVM: [[TRY_BEGIN]]:
-// LLVM:   br label %[[CLEANUP_SCOPE:.*]]
-// LLVM: [[CLEANUP_SCOPE]]:
-// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
-// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// LLVM: [[INVOKE_CONT]]:
-// LLVM:   br label %[[CLEANUP:.*]]
-// LLVM: [[CLEANUP]]:
-// LLVM:   call void @_ZN1SD1Ev(ptr {{.*}})
-// LLVM:   br label %[[END_CLEANUP:.*]]
-// LLVM: [[END_CLEANUP]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[LANDING_PAD]]:
-// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
-// LLVM:           cleanup
-// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
-// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
-// LLVM:   br label %[[CLEANUP_LANDING:.*]]
-// LLVM: [[CLEANUP_LANDING]]:
-// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
-// LLVM:   call void @_ZN1SD1Ev(ptr {{.*}})
-// LLVM:   br label %[[CATCH:.*]]
-// LLVM: [[CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CLEANUP_LANDING:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CLEANUP_LANDING:.*]] ]
-// LLVM:   br label %[[BEGIN_CATCH:.*]]
-// LLVM: [[BEGIN_CATCH]]:
-// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[CATCH:.*]] ]
-// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
-// LLVM:   br label %[[CATCH_BODY:.*]]
-// LLVM: [[CATCH_BODY]]:
-// LLVM:   br label %[[END_CATCH:.*]]
-// LLVM: [[END_CATCH]]:
-// LLVM:   call void @__cxa_end_catch()
-// LLVM:   br label %[[END_DISPATCH:.*]]
-// LLVM: [[END_DISPATCH]]:
-// LLVM:   br label %[[END_TRY:.*]]
-// LLVM: [[END_TRY]]:
-// LLVM:   br label %[[TRY_CONT:.*]]
-// LLVM: [[TRY_CONT]]:
-// LLVM:   br label %[[DONE:.*]]
-// LLVM: [[DONE]]:
-// LLVM:   ret void
-
-// OGCG: define {{.*}} void @_Z23cleanup_inside_try_bodyv() {{.*}} personality ptr @__gxx_personality_v0 {
-// OGCG:   %[[S:.*]] = alloca %struct.S
-// OGCG:   %[[EXN_SLOT:.*]] = alloca ptr
-// OGCG:   %[[EHSELECTOR_SLOT:.*]] = alloca i32
-// OGCG:   %[[CALL:.*]] = invoke {{.*}} i32 @_Z8divisionv()
-// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
-// OGCG: [[INVOKE_CONT]]:
-// OGCG:   call void @_ZN1SD1Ev(ptr noundef nonnull align 1 dereferenceable(1) %[[S]])
-// OGCG:   br label %[[TRY_CONT:.*]]
-// OGCG: [[LANDING_PAD]]:
-// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
-// OGCG:           catch ptr null
-// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
-// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXN_SLOT]]
-// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
-// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EHSELECTOR_SLOT]]
-// OGCG:   call void @_ZN1SD1Ev(ptr noundef nonnull align 1 dereferenceable(1) %[[S]])
-// OGCG:   br label %[[CATCH:.*]]
-// OGCG: [[CATCH]]:
-// OGCG:   %[[EXCEPTION:.*]] = load ptr, ptr %[[EXN_SLOT]]
-// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXCEPTION]])
-// OGCG:   call void @__cxa_end_catch()
-// OGCG:   br label %[[TRY_CONT]]
diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp
index ece73addbbf24..a9ad38a56715d 100644
--- a/clang/test/CIR/CodeGen/try-catch.cpp
+++ b/clang/test/CIR/CodeGen/try-catch.cpp
@@ -194,3 +194,805 @@ void calling_noexcept_function_inside_try_block() {
 
 // OGCG: call void @_Z22function_with_noexceptv()
 // OGCG: ret void
+
+int division();
+
+void call_function_inside_try_catch_all() {
+  try {
+    division();
+  } catch (...) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z34call_function_inside_try_catch_allv() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   cir.try {
+// CIR:       %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:       cir.yield
+// CIR:   } catch all (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
+// CIR:       %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
+// CIR:       cir.cleanup.scope {
+// CIR:         cir.yield
+// CIR:       } cleanup {{.*}} {
+// CIR:         cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:         cir.yield
+// CIR:       }
+// CIR:       cir.yield
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z34call_function_inside_try_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr null
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[BEGIN_CATCH:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI1]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z34call_function_inside_try_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXN_OBJ_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_SELECTOR_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[LANDING_PAD]]:
+// OGCG:   %[[LP:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr null
+// OGCG:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// OGCG:   store ptr %[[EXN_OBJ]], ptr %[[EXN_OBJ_ADDR]], align 8
+// OGCG:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// OGCG:   store i32 %[[EH_SELECTOR_VAL]], ptr %[[EH_SELECTOR_ADDR]], align 4
+// OGCG:   br label %[[CATCH:.*]]
+// OGCG: [[CATCH]]:
+// OGCG:   %[[EXN_OBJ:.*]] = load ptr, ptr %[[EXN_OBJ_ADDR]], align 8
+// OGCG:   %[[CATCH_BEGIN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ]])
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+
+void call_function_inside_try_catch_with_exception_type() {
+  try {
+    division();
+  } catch (int e) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z50call_function_inside_try_catch_with_exception_typev() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e"]
+// CIR:   cir.try {
+// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:     cir.yield
+// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
+// CIR:     cir.cleanup.scope {
+// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR:       cir.yield
+// CIR:     } cleanup {{.*}} {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
+// CIR:     cir.resume %{{.*}} : !cir.eh_token
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z50call_function_inside_try_catch_with_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr @_ZTIi
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[DISPATCH:.*]]
+// LLVM: [[DISPATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
+// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   %[[LOAD:.*]] = load i32, ptr %[[TOKEN]], align 4
+// LLVM:   store i32 %[[LOAD]], ptr {{.*}}, align 4
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[RESUME]]:
+// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
+// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
+// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z50call_function_inside_try_catch_with_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[E_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
+// OGCG: [[INVOKE_NORMAL]]:
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[INVOKE_UNWIND]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr @_ZTIi
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   br label %[[CATCH_DISPATCH:.*]]
+// OGCG: [[CATCH_DISPATCH]]:
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
+// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
+// OGCG: [[CATCH_EXCEPTION]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   %[[TMP_BEGIN_CATCH:.*]] = load i32, ptr %[[BEGIN_CATCH]], align 4
+// OGCG:   store i32 %[[TMP_BEGIN_CATCH]], ptr %[[E_ADDR]], align 4
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
+// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
+// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+
+void call_function_inside_try_catch_with_ref_exception_type() {
+  try {
+    division();
+  } catch (int &ref) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z54call_function_inside_try_catch_with_ref_exception_typev() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["ref", const]
+// CIR:   cir.try {
+// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:     cir.yield
+// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%{{.*}}: !cir.eh_token {{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %{{.*}} : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
+// CIR:     cir.cleanup.scope {
+// CIR:       cir.store {{.*}} %[[EXN_PTR]], %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR:       cir.yield
+// CIR:     } cleanup all {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } unwind (%{{.*}}: !cir.eh_token {{.*}}) {
+// CIR:     cir.resume %{{.*}} : !cir.eh_token
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z54call_function_inside_try_catch_with_ref_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT:.*]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr @_ZTIi
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[DISPATCH:.*]]
+// LLVM: [[DISPATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
+// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   store ptr %[[TOKEN]], ptr %{{.*}}, align 8
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[RESUME]]:
+// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
+// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
+// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z54call_function_inside_try_catch_with_ref_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[E_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
+// OGCG: [[INVOKE_NORMAL]]:
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[INVOKE_UNWIND]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr @_ZTIi
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %1, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   br label %[[CATCH_DISPATCH:.*]]
+// OGCG: [[CATCH_DISPATCH]]:
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
+// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
+// OGCG: [[CATCH_EXCEPTION]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   store ptr %[[BEGIN_CATCH]], ptr %[[E_ADDR]], align 8
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
+// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
+// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+
+void call_function_inside_try_catch_with_complex_exception_type() {
+  try {
+    division();
+  } catch (int _Complex e) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z58call_function_inside_try_catch_with_complex_exception_typev() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["e"]
+// CIR:   cir.try {
+// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:     cir.yield
+// CIR:   } catch [type #cir.global_view<@_ZTICi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!cir.complex<!s32i>>)
+// CIR:     cir.cleanup.scope {
+// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+// CIR:       cir.yield
+// CIR:     } cleanup {{.*}} {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
+// CIR:     cir.resume %{{.*}} : !cir.eh_token
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z58call_function_inside_try_catch_with_complex_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr @_ZTICi
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[DISPATCH:.*]]
+// LLVM: [[DISPATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTICi)
+// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
+// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   %[[LOAD:.*]] = load { i32, i32 }, ptr %[[TOKEN]], align 4
+// LLVM:   store { i32, i32 } %[[LOAD]], ptr {{.*}}, align 4
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[RESUME]]:
+// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
+// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
+// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z58call_function_inside_try_catch_with_complex_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[E_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
+// OGCG: [[INVOKE_NORMAL]]:
+// OGCG:    br label %[[TRY_CONT:.*]]
+// OGCG: [[INVOKE_UNWIND]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr @_ZTICi
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   br label %[[CATCH_DISPATCH:.*]]
+// OGCG: [[CATCH_DISPATCH]]:
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTICi)
+// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
+// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
+// OGCG: [[CATCH_EXCEPTION]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   %[[EXCEPTION_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[BEGIN_CATCH]], i32 0, i32 0
+// OGCG:   %[[EXCEPTION_REAL:.*]] = load i32, ptr %[[EXCEPTION_REAL_PTR]], align 4
+// OGCG:   %[[EXCEPTION_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[BEGIN_CATCH]], i32 0, i32 1
+// OGCG:   %[[EXCEPTION_IMAG:.*]] = load i32, ptr %[[EXCEPTION_IMAG_PTR]], align 4
+// OGCG:   %[[E_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[E_ADDR]], i32 0, i32 0
+// OGCG:   %[[E_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[E_ADDR]], i32 0, i32 1
+// OGCG:   store i32 %[[EXCEPTION_REAL]], ptr %[[E_REAL_PTR]], align 4
+// OGCG:   store i32 %[[EXCEPTION_IMAG]], ptr %[[E_IMAG_PTR]], align 4
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
+// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
+// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+
+void call_function_inside_try_catch_with_array_exception_type() {
+  try {
+    division();
+  } catch (int e[]) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z56call_function_inside_try_catch_with_array_exception_typev() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   %[[E_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["e"]
+// CIR:   cir.try {
+// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:     cir.yield
+// CIR:   } catch [type #cir.global_view<@_ZTIPi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
+// CIR:     cir.cleanup.scope {
+// CIR:       cir.store {{.*}} %[[EXN_PTR]], %[[E_ADDR]] : !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>
+// CIR:       cir.yield
+// CIR:     } cleanup {{.*}} {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } unwind (%{{.*}}: !cir.eh_token{{.*}}) {
+// CIR:     cir.resume %{{.*}} : !cir.eh_token
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z56call_function_inside_try_catch_with_array_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr @_ZTIPi
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[DISPATCH:.*]]
+// LLVM: [[DISPATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIPi)
+// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
+// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[RESUME:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   store ptr %[[TOKEN]], ptr {{.*}}, align 8
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[RESUME]]:
+// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[EXN_OBJ_PHI3]], 0
+// LLVM:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[EH_SELECTOR_PHI3]], 1
+// LLVM:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z56call_function_inside_try_catch_with_array_exception_typev() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[E_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:          to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
+// OGCG: [[INVOKE_NORMAL]]:
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[INVOKE_UNWIND]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr @_ZTIPi
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   br label %[[CATCH_DISPATCH:.*]]
+// OGCG: [[CATCH_DISPATCH]]:
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %ehselector.slot, align 4
+// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIPi)
+// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
+// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[EH_RESUME:.*]]
+// OGCG: [[CATCH_EXCEPTION]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   store ptr %[[BEGIN_CATCH]], ptr %[[E_ADDR]], align 8
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+// OGCG: [[EH_RESUME]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[TMP_EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } poison, ptr %[[TMP_EXCEPTION]], 0
+// OGCG:   %[[EXCEPTION_INFO:.*]] = insertvalue { ptr, i32 } %[[TMP_EXCEPTION_INFO]], i32 %[[TMP_EH_TYPE_ID]], 1
+// OGCG:   resume { ptr, i32 } %[[EXCEPTION_INFO]]
+
+void call_function_inside_try_catch_with_exception_type_and_catch_all() {
+  try {
+    division();
+  } catch (int e) {
+  } catch (...) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() personality(@__gxx_personality_v0)
+// CIR: cir.scope {
+// CIR:   %[[EXCEPTION_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e"]
+// CIR:   cir.try {
+// CIR:     %[[CALL:.*]] = cir.call @_Z8divisionv()
+// CIR:     cir.yield
+// CIR:   } catch [type #cir.global_view<@_ZTIi> : !cir.ptr<!u8i>] (%[[EH_TOKEN:.*]]: !cir.eh_token{{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[EH_TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!s32i>)
+// CIR:     cir.cleanup.scope {
+// CIR:       %[[TMP:.*]] = cir.load {{.*}} %[[EXN_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR:       cir.store {{.*}} %[[TMP]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
+// CIR:       cir.yield
+// CIR:     } cleanup {{.*}} {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } catch all (%[[EH_TOKEN2:.*]]: !cir.eh_token{{.*}}) {
+// CIR:     %[[CATCH_TOKEN2:.*]], %{{.*}} = cir.begin_catch %[[EH_TOKEN2]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
+// CIR:     cir.cleanup.scope {
+// CIR:       cir.yield
+// CIR:     } cleanup {{.*}} {
+// CIR:       cir.end_catch %[[CATCH_TOKEN2]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           catch ptr @_ZTIi
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   br label %[[DISPATCH:.*]]
+// LLVM: [[DISPATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// LLVM:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[EH_SELECTOR_PHI1]], %[[EH_TYPE_ID]]
+// LLVM:   br i1 %[[TYPE_ID_EQ]], label %[[BEGIN_CATCH:.*]], label %[[CATCH_ALL:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   %[[LOAD:.*]] = load i32, ptr %[[TOKEN]], align 4
+// LLVM:   store i32 %[[LOAD]], ptr {{.*}}, align 4
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[CATCH_ALL]]:
+// LLVM:   %[[EXN_OBJ_PHI3:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI3:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[DISPATCH:.*]] ]
+// LLVM:   %[[TOKEN2:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI3]])
+// LLVM:   br label %[[CATCH_ALL_BODY:.*]]
+// LLVM: [[CATCH_ALL_BODY]]:
+// LLVM:   br label %[[END_CATCH2:.*]]
+// LLVM: [[END_CATCH2]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH2:.*]]
+// LLVM: [[END_DISPATCH2]]:
+// LLVM:   br label %[[END_TRY2:.*]]
+// LLVM: [[END_TRY2]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z64call_function_inside_try_catch_with_exception_type_and_catch_allv() {{.*}} personality ptr @__gxx_personality_v0
+// OGCG:   %[[EXCEPTION_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[EH_TYPE_ID_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[E_ADDR:.*]] = alloca i32, align 4
+// OGCG:   %[[CALL:.*]] = invoke noundef i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_NORMAL:.*]] unwind label %[[INVOKE_UNWIND:.*]]
+// OGCG: [[INVOKE_NORMAL]]:
+// OGCG:   br label %try.cont
+// OGCG: [[INVOKE_UNWIND]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr @_ZTIi
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   br label %[[CATCH_DISPATCH:.*]]
+// OGCG: [[CATCH_DISPATCH]]:
+// OGCG:   %[[TMP_EH_TYPE_ID:.*]] = load i32, ptr %[[EH_TYPE_ID_ADDR]], align 4
+// OGCG:   %[[EH_TYPE_ID:.*]] = call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi)
+// OGCG:   %[[TYPE_ID_EQ:.*]] = icmp eq i32 %[[TMP_EH_TYPE_ID]], %[[EH_TYPE_ID]]
+// OGCG:   br i1 %[[TYPE_ID_EQ]], label %[[CATCH_EXCEPTION:.*]], label %[[CATCH_ALL:.*]]
+// OGCG: [[CATCH_EXCEPTION]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   %[[TMP_BEGIN_CATCH:.*]] = load i32, ptr %[[BEGIN_CATCH]], align 4
+// OGCG:   store i32 %[[TMP_BEGIN_CATCH]], ptr %[[E_ADDR]], align 4
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[TRY_CONT]]:
+// OGCG:   ret void
+// OGCG: [[CATCH_ALL]]:
+// OGCG:   %[[TMP_EXCEPTION:.*]] = load ptr, ptr %[[EXCEPTION_ADDR]], align 8
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[TMP_EXCEPTION]])
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]
+
+struct S {
+  ~S();
+};
+
+void cleanup_inside_try_body() {
+  try {
+    S s;
+    division();
+  } catch (...) {
+  }
+}
+
+// CIR: cir.func {{.*}} @_Z23cleanup_inside_try_bodyv(){{.*}} personality(@__gxx_personality_v0) {
+// CIR: cir.scope {
+// CIR:   %[[S:.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s"]
+// CIR:   cir.try {
+// CIR:     cir.cleanup.scope {
+// CIR:       cir.call @_Z8divisionv()
+// CIR:       cir.yield
+// CIR:     } cleanup  all {
+// CIR:       cir.call @_ZN1SD1Ev(%[[S]])
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   } catch all (%[[TOKEN:.*]]: !cir.eh_token {{.*}}) {
+// CIR:     %[[CATCH_TOKEN:.*]], %[[EXN_PTR:.*]] = cir.begin_catch %[[TOKEN]] : !cir.eh_token -> (!cir.catch_token, !cir.ptr<!void>)
+// CIR:     cir.cleanup.scope {
+// CIR:       cir.yield
+// CIR:     } cleanup  all {
+// CIR:       cir.end_catch %[[CATCH_TOKEN]] : !cir.catch_token
+// CIR:       cir.yield
+// CIR:     }
+// CIR:     cir.yield
+// CIR:   }
+// CIR: }
+
+// LLVM: define {{.*}} void @_Z23cleanup_inside_try_bodyv() {{.*}} personality ptr @__gxx_personality_v0
+// LLVM:   br label %[[TRY_SCOPE:.*]]
+// LLVM: [[TRY_SCOPE]]:
+// LLVM:   br label %[[TRY_BEGIN:.*]]
+// LLVM: [[TRY_BEGIN]]:
+// LLVM:   br label %[[CLEANUP_SCOPE:.*]]
+// LLVM: [[CLEANUP_SCOPE]]:
+// LLVM:   %[[CALL:.*]] = invoke i32 @_Z8divisionv()
+// LLVM:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// LLVM: [[INVOKE_CONT]]:
+// LLVM:   br label %[[CLEANUP:.*]]
+// LLVM: [[CLEANUP]]:
+// LLVM:   call void @_ZN1SD1Ev(ptr {{.*}})
+// LLVM:   br label %[[END_CLEANUP:.*]]
+// LLVM: [[END_CLEANUP]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[LANDING_PAD]]:
+// LLVM:   %[[LP:.*]] = landingpad { ptr, i32 }
+// LLVM:           cleanup
+// LLVM:   %[[EXN_OBJ:.*]] = extractvalue { ptr, i32 } %[[LP]], 0
+// LLVM:   %[[EH_SELECTOR_VAL:.*]] = extractvalue { ptr, i32 } %[[LP]], 1
+// LLVM:   br label %[[CLEANUP_LANDING:.*]]
+// LLVM: [[CLEANUP_LANDING]]:
+// LLVM:   %[[EXN_OBJ_PHI:.*]] = phi ptr [ %[[EXN_OBJ:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI:.*]] = phi i32 [ %[[EH_SELECTOR_VAL:.*]], %[[LANDING_PAD:.*]] ]
+// LLVM:   call void @_ZN1SD1Ev(ptr {{.*}})
+// LLVM:   br label %[[CATCH:.*]]
+// LLVM: [[CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI1:.*]] = phi ptr [ %[[EXN_OBJ_PHI:.*]], %[[CLEANUP_LANDING:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI1:.*]] = phi i32 [ %[[EH_SELECTOR_PHI:.*]], %[[CLEANUP_LANDING:.*]] ]
+// LLVM:   br label %[[BEGIN_CATCH:.*]]
+// LLVM: [[BEGIN_CATCH]]:
+// LLVM:   %[[EXN_OBJ_PHI2:.*]] = phi ptr [ %[[EXN_OBJ_PHI1:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[EH_SELECTOR_PHI2:.*]] = phi i32 [ %[[EH_SELECTOR_PHI1:.*]], %[[CATCH:.*]] ]
+// LLVM:   %[[TOKEN:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXN_OBJ_PHI2]])
+// LLVM:   br label %[[CATCH_BODY:.*]]
+// LLVM: [[CATCH_BODY]]:
+// LLVM:   br label %[[END_CATCH:.*]]
+// LLVM: [[END_CATCH]]:
+// LLVM:   call void @__cxa_end_catch()
+// LLVM:   br label %[[END_DISPATCH:.*]]
+// LLVM: [[END_DISPATCH]]:
+// LLVM:   br label %[[END_TRY:.*]]
+// LLVM: [[END_TRY]]:
+// LLVM:   br label %[[TRY_CONT:.*]]
+// LLVM: [[TRY_CONT]]:
+// LLVM:   br label %[[DONE:.*]]
+// LLVM: [[DONE]]:
+// LLVM:   ret void
+
+// OGCG: define {{.*}} void @_Z23cleanup_inside_try_bodyv() {{.*}} personality ptr @__gxx_personality_v0 {
+// OGCG:   %[[S:.*]] = alloca %struct.S
+// OGCG:   %[[EXN_SLOT:.*]] = alloca ptr
+// OGCG:   %[[EHSELECTOR_SLOT:.*]] = alloca i32
+// OGCG:   %[[CALL:.*]] = invoke {{.*}} i32 @_Z8divisionv()
+// OGCG:           to label %[[INVOKE_CONT:.*]] unwind label %[[LANDING_PAD:.*]]
+// OGCG: [[INVOKE_CONT]]:
+// OGCG:   call void @_ZN1SD1Ev(ptr noundef nonnull align 1 dereferenceable(1) %[[S]])
+// OGCG:   br label %[[TRY_CONT:.*]]
+// OGCG: [[LANDING_PAD]]:
+// OGCG:   %[[LANDING_PAD:.*]] = landingpad { ptr, i32 }
+// OGCG:           catch ptr null
+// OGCG:   %[[EXCEPTION:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 0
+// OGCG:   store ptr %[[EXCEPTION]], ptr %[[EXN_SLOT]]
+// OGCG:   %[[EH_TYPE_ID:.*]] = extractvalue { ptr, i32 } %[[LANDING_PAD]], 1
+// OGCG:   store i32 %[[EH_TYPE_ID]], ptr %[[EHSELECTOR_SLOT]]
+// OGCG:   call void @_ZN1SD1Ev(ptr noundef nonnull align 1 dereferenceable(1) %[[S]])
+// OGCG:   br label %[[CATCH:.*]]
+// OGCG: [[CATCH]]:
+// OGCG:   %[[EXCEPTION:.*]] = load ptr, ptr %[[EXN_SLOT]]
+// OGCG:   %[[BEGIN_CATCH:.*]] = call ptr @__cxa_begin_catch(ptr %[[EXCEPTION]])
+// OGCG:   call void @__cxa_end_catch()
+// OGCG:   br label %[[TRY_CONT]]

>From c56aad6b4f52c9088be5847aa7da33d27df0b338 Mon Sep 17 00:00:00 2001
From: Ayush Kumar Gaur <132849148+Ayush3941 at users.noreply.github.com>
Date: Sat, 7 Mar 2026 07:21:30 -0500
Subject: [PATCH 04/33] [DAG] isKnownNeverZero: Add DemandedElts handling for
 ROTL/ROTR/BITREVERSE/BSWAP/CTPOP/ABS (#184033)

Add DemandedElts handling for ROTL/ROTR/BITREVERSE/BSWAP/CTPOP/ABS in SelectionDAG::isKnownNeverZero

Fixes #183040
---
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |  2 +-
 llvm/test/CodeGen/X86/known-never-zero.ll     | 54 +++++++------------
 2 files changed, 19 insertions(+), 37 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index c4cd1b88f7802..a2318fc034fa2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6319,7 +6319,7 @@ bool SelectionDAG::isKnownNeverZero(SDValue Op, const APInt &DemandedElts,
   case ISD::BSWAP:
   case ISD::CTPOP:
   case ISD::ABS:
-    return isKnownNeverZero(Op.getOperand(0), Depth + 1);
+    return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
 
   case ISD::SRA:
   case ISD::SRL: {
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index 46ccb5c825905..8327a90bdeda5 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1075,9 +1075,7 @@ define i32 @rotr_with_fshr_known_nonzero_vec(<4 x i32> %xx, <4 x i32> %y, ptr %p
 ; X86-NEXT:    por %xmm3, %xmm0
 ; X86-NEXT:    movdqa %xmm0, (%eax)
 ; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: rotr_with_fshr_known_nonzero_vec:
@@ -1099,9 +1097,8 @@ define i32 @rotr_with_fshr_known_nonzero_vec(<4 x i32> %xx, <4 x i32> %y, ptr %p
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; X64-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 256, i32 0, i32 0, i32 0>
   %z = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %y)
@@ -1235,9 +1232,7 @@ define i32 @rotl_with_fshl_known_nonzero_vec(<4 x i32> %xx, <4 x i32> %y, ptr %p
 ; X86-NEXT:    por %xmm3, %xmm0
 ; X86-NEXT:    movdqa %xmm0, (%eax)
 ; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: rotl_with_fshl_known_nonzero_vec:
@@ -1257,9 +1252,8 @@ define i32 @rotl_with_fshl_known_nonzero_vec(<4 x i32> %xx, <4 x i32> %y, ptr %p
 ; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
 ; X64-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 256, i32 0, i32 0, i32 0>
   %z = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %y)
@@ -2192,9 +2186,7 @@ define i32 @abs_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X86-NEXT:    psubd %xmm1, %xmm0
 ; X86-NEXT:    movdqa %xmm0, (%eax)
 ; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abs_known_nonzero_vec:
@@ -2202,9 +2194,8 @@ define i32 @abs_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X64-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vpabsd %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 64, i32 -1, i32 0, i32 0>
   %z = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %x, i1 0)
@@ -2276,9 +2267,7 @@ define i32 @bswap_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X86-NEXT:    packuswb %xmm2, %xmm0
 ; X86-NEXT:    movdqa %xmm0, (%eax)
 ; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: bswap_known_nonzero_vec:
@@ -2286,9 +2275,8 @@ define i32 @bswap_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X64-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,u,u,u,u,11,10,9,8,15,14,13,12]
 ; X64-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 64, i32 -1, i32 0, i32 0>
   %z = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %x)
@@ -2410,9 +2398,7 @@ define i32 @bitreverse_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X86-NEXT:    por %xmm1, %xmm0
 ; X86-NEXT:    movdqa %xmm0, (%eax)
 ; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: bitreverse_known_nonzero_vec:
@@ -2429,9 +2415,8 @@ define i32 @bitreverse_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X64-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
 ; X64-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 64, i32 -1, i32 0, i32 0>
   %z = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %x)
@@ -2572,9 +2557,7 @@ define i32 @ctpop_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X86-NEXT:    packuswb %xmm2, %xmm1
 ; X86-NEXT:    movdqa %xmm1, (%eax)
 ; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    bsfl %eax, %ecx
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    rep bsfl %eax, %eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: ctpop_known_nonzero_vec:
@@ -2595,9 +2578,8 @@ define i32 @ctpop_known_nonzero_vec(<4 x i32> %xx, ptr %p) nounwind {
 ; X64-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
 ; X64-NEXT:    vmovdqa %xmm0, (%rdi)
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    rep bsfl %ecx, %eax
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    rep bsfl %eax, %eax
 ; X64-NEXT:    retq
   %x = or <4 x i32> %xx, <i32 64, i32 -1, i32 0, i32 0>
   %z = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %x)

>From fe7d245c187b3c53de72662f177574f7e5c2167a Mon Sep 17 00:00:00 2001
From: Amr Hesham <amr96 at programmer.net>
Date: Sat, 7 Mar 2026 14:27:35 +0100
Subject: [PATCH 05/33] [CIR][NFC] ComplexType visitStmt replace NYI with
 unreachable (#185178)

Replace errorNYI with llvm_unreachable in ComplexType visitStmt
---
 clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index c57f9a3b2ce9d..ff5a58a8eb122 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -61,8 +61,8 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> {
   }
 
   mlir::Value VisitStmt(Stmt *s) {
-    cgf.cgm.errorNYI(s->getBeginLoc(), "ComplexExprEmitter VisitStmt");
-    return {};
+    s->dump(llvm::errs(), cgf.getContext());
+    llvm_unreachable("Stmt can't have complex result type!");
   }
 
   mlir::Value VisitExpr(Expr *e);

>From 898dc857633bebda504a20390220c87761050f08 Mon Sep 17 00:00:00 2001
From: Alexis Engelke <engelke at in.tum.de>
Date: Sat, 7 Mar 2026 14:35:51 +0100
Subject: [PATCH 06/33] [clangd] Disable PCH for libs that use gRPC (#185185)

gRPC adds -pthread, which is incompatible with PCH without it. At some
point, we probably should generally switch from -lpthread to -pthread,
but until then, disable PCH for affected targets.

Fixes https://github.com/llvm/llvm-project/issues/184759.
---
 clang-tools-extra/clangd/index/dex/dexp/CMakeLists.txt       | 3 +++
 clang-tools-extra/clangd/index/remote/CMakeLists.txt         | 3 +++
 .../clangd/index/remote/marshalling/CMakeLists.txt           | 3 +++
 clang-tools-extra/clangd/index/remote/monitor/CMakeLists.txt | 3 +++
 clang-tools-extra/clangd/index/remote/server/CMakeLists.txt  | 3 +++
 clang-tools-extra/clangd/tool/CMakeLists.txt                 | 3 +++
 clang/cmake/modules/AddGRPC.cmake                            | 5 ++++-
 7 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/clang-tools-extra/clangd/index/dex/dexp/CMakeLists.txt b/clang-tools-extra/clangd/index/dex/dexp/CMakeLists.txt
index 4fe42cb8786f1..ae11c43fda86a 100644
--- a/clang-tools-extra/clangd/index/dex/dexp/CMakeLists.txt
+++ b/clang-tools-extra/clangd/index/dex/dexp/CMakeLists.txt
@@ -5,6 +5,9 @@ set(LLVM_LINK_COMPONENTS
 
 add_clang_executable(dexp
   Dexp.cpp
+
+  # Disable PCH, gRPC adds -pthread.
+  DISABLE_PCH_REUSE
   )
 
 clang_target_link_libraries(dexp
diff --git a/clang-tools-extra/clangd/index/remote/CMakeLists.txt b/clang-tools-extra/clangd/index/remote/CMakeLists.txt
index 28df71855a142..ef4d04b47dae2 100644
--- a/clang-tools-extra/clangd/index/remote/CMakeLists.txt
+++ b/clang-tools-extra/clangd/index/remote/CMakeLists.txt
@@ -32,6 +32,9 @@ if (CLANGD_ENABLE_REMOTE)
     DEPENDS
     clangdRemoteIndexProto
     clangdRemoteIndexServiceProto
+
+    # Disable PCH, gRPC adds -pthread.
+    DISABLE_PCH_REUSE
     )
 
   clang_target_link_libraries(clangdRemoteIndex
diff --git a/clang-tools-extra/clangd/index/remote/marshalling/CMakeLists.txt b/clang-tools-extra/clangd/index/remote/marshalling/CMakeLists.txt
index d7e37003e5f98..74963e838677c 100644
--- a/clang-tools-extra/clangd/index/remote/marshalling/CMakeLists.txt
+++ b/clang-tools-extra/clangd/index/remote/marshalling/CMakeLists.txt
@@ -8,4 +8,7 @@ add_clang_library(clangdRemoteMarshalling STATIC
 
   DEPENDS
   clangdRemoteIndexProto
+
+  # Disable PCH, gRPC adds -pthread.
+  DISABLE_PCH_REUSE
   )
diff --git a/clang-tools-extra/clangd/index/remote/monitor/CMakeLists.txt b/clang-tools-extra/clangd/index/remote/monitor/CMakeLists.txt
index 194c7d5128a98..53351ae901b3d 100644
--- a/clang-tools-extra/clangd/index/remote/monitor/CMakeLists.txt
+++ b/clang-tools-extra/clangd/index/remote/monitor/CMakeLists.txt
@@ -6,6 +6,9 @@ add_clang_executable(clangd-index-server-monitor
 
   DEPENDS
   clangdRemoteIndexServiceProto
+
+  # Disable PCH, gRPC adds -pthread.
+  DISABLE_PCH_REUSE
   )
 
 target_link_libraries(clangd-index-server-monitor
diff --git a/clang-tools-extra/clangd/index/remote/server/CMakeLists.txt b/clang-tools-extra/clangd/index/remote/server/CMakeLists.txt
index 3bea34826f8a1..12090d83928fd 100644
--- a/clang-tools-extra/clangd/index/remote/server/CMakeLists.txt
+++ b/clang-tools-extra/clangd/index/remote/server/CMakeLists.txt
@@ -7,6 +7,9 @@ add_clang_executable(clangd-index-server
   DEPENDS
   clangdRemoteIndexProto
   clangdRemoteIndexServiceProto
+
+  # Disable PCH, gRPC adds -pthread.
+  DISABLE_PCH_REUSE
   )
 
 target_link_libraries(clangd-index-server
diff --git a/clang-tools-extra/clangd/tool/CMakeLists.txt b/clang-tools-extra/clangd/tool/CMakeLists.txt
index 850a82833e444..1bd6a1f864dd3 100644
--- a/clang-tools-extra/clangd/tool/CMakeLists.txt
+++ b/clang-tools-extra/clangd/tool/CMakeLists.txt
@@ -4,6 +4,9 @@ set(LLVM_OPTIONAL_SOURCES ClangdToolMain.cpp)
 add_clang_library(clangdMain STATIC
   ClangdMain.cpp
   Check.cpp
+
+  # Disable PCH, gRPC adds -pthread.
+  DISABLE_PCH_REUSE
   )
 
 add_clang_tool(clangd
diff --git a/clang/cmake/modules/AddGRPC.cmake b/clang/cmake/modules/AddGRPC.cmake
index 8989bd757d247..5d752bf86459d 100644
--- a/clang/cmake/modules/AddGRPC.cmake
+++ b/clang/cmake/modules/AddGRPC.cmake
@@ -7,5 +7,8 @@ function(generate_clang_protos_library LibraryName ProtoFile)
 
   add_clang_library(${LibraryName} ${ProtoSource}
     PARTIAL_SOURCES_INTENDED
-    LINK_LIBS PUBLIC grpc++ protobuf)
+    LINK_LIBS PUBLIC grpc++ protobuf
+    # Disable PCH, gRPC adds -pthread.
+    DISABLE_PCH_REUSE
+    )
 endfunction()

>From 818168ebca24640f8cfc495abc79f658e43c7e2c Mon Sep 17 00:00:00 2001
From: Aditya Singh <aditya26189 at kgpian.iitkgp.ac.in>
Date: Sat, 7 Mar 2026 19:34:58 +0530
Subject: [PATCH 07/33] [clang-tidy] Fix false positive for constrained
 template parameters in `cppcoreguidelines-missing-std-forward` (#182038)

Explicit object parameters with a type constraint are skipped to avoid
false positives. They are technically still forwarding references per
[temp.deduct.call], but rarely intended to be perfectly forwarded.

Fixes #180362
---
 .../MissingStdForwardCheck.cpp                | 10 ++++
 clang-tools-extra/docs/ReleaseNotes.rst       |  4 ++
 .../cppcoreguidelines/missing-std-forward.rst |  7 +++
 .../missing-std-forward-cxx23.cpp             | 58 +++++++++++++++++++
 4 files changed, 79 insertions(+)
 create mode 100644 clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward-cxx23.cpp

diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
index d1d81d510c8fb..3fda2a671eaf4 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
@@ -46,6 +46,16 @@ AST_MATCHER(ParmVarDecl, isTemplateTypeParameter) {
 
   const QualType ParamType =
       Node.getType().getNonPackExpansionType()->getPointeeType();
+
+  // Explicit object parameters with a type constraint are still forwarding
+  // references per [temp.deduct.call]. We conservatively suppress warnings
+  // here to avoid false positives when constraints restrict the deduced type,
+  // accepting false negatives as a trade-off.
+  if (Node.isExplicitObjectParameter())
+    if (const auto *TTPT = ParamType->getAs<TemplateTypeParmType>())
+      if (const auto *Decl = TTPT->getDecl(); Decl && Decl->hasTypeConstraint())
+        return false;
+
   const auto *TemplateType = ParamType->getAsCanonical<TemplateTypeParmType>();
   if (!TemplateType)
     return false;
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index b0b4cd646c3bd..9f4d7e6923fa0 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -216,6 +216,10 @@ Changes in existing checks
   <clang-tidy/checks/cppcoreguidelines/init-variables>` check by ensuring that
   member pointers are correctly flagged as uninitialized.
 
+- Improved :doc:`cppcoreguidelines-missing-std-forward
+  <clang-tidy/checks/cppcoreguidelines/missing-std-forward>` check by fixing
+  a false positive for constrained template parameters.
+
 - Improved :doc:`cppcoreguidelines-pro-type-vararg
   <clang-tidy/checks/cppcoreguidelines/pro-type-vararg>` check by no longer
   warning on builtins with custom type checking (e.g., type-generic builtins
diff --git a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst
index c2af1239a7d4c..c8c0206a4d67c 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.rst
@@ -45,3 +45,10 @@ Options
 This check implements `F.19
 <http://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#rf-forward>`_
 from the C++ Core Guidelines.
+
+Limitations
+-----------
+
+Explicit object parameters (``this Self&&``) with a type constraint are not
+checked to avoid false positives. Such parameters are rarely intended to be
+perfectly forwarded.
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward-cxx23.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward-cxx23.cpp
new file mode 100644
index 0000000000000..723b7893673a1
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward-cxx23.cpp
@@ -0,0 +1,58 @@
+// RUN: %check_clang_tidy -std=c++23-or-later %s cppcoreguidelines-missing-std-forward %t -- -- -fno-delayed-template-parsing
+
+// NOLINTBEGIN
+namespace std {
+
+template <typename T> struct remove_reference      { using type = T; };
+template <typename T> struct remove_reference<T&>  { using type = T; };
+template <typename T> struct remove_reference<T&&> { using type = T; };
+
+template <typename T> using remove_reference_t = typename remove_reference<T>::type;
+
+template <typename T> constexpr T &&forward(remove_reference_t<T> &t) noexcept;
+template <typename T> constexpr T &&forward(remove_reference_t<T> &&t) noexcept;
+template <typename T> constexpr remove_reference_t<T> &&move(T &&x);
+
+template <class T, class U>
+concept derived_from = true;
+
+template <class T>
+concept integral = true;
+
+} // namespace std
+// NOLINTEND
+
+// Tests for constrained explicit object parameters (GH#180362).
+
+// Constrained explicit object parameter is not a forwarding reference.
+class GH180362_A {
+public:
+  template <std::derived_from<GH180362_A> Self>
+  auto operator|(this Self &&self, int) -> void {}
+};
+
+// Another constrained explicit object parameter — no warning.
+struct GH180362_C {
+  template <std::integral Self>
+  void bar(this Self &&self) {}
+};
+
+// Unconstrained explicit object parameter IS a forwarding reference.
+struct GH180362_B {
+  template <typename Self>
+  void foo(this Self &&self) {}
+  // CHECK-MESSAGES: :[[@LINE-1]]:24: warning: forwarding reference parameter 'self' is never forwarded inside the function body [cppcoreguidelines-missing-std-forward]
+};
+
+// Requires-clause on explicit object parameter is NOT a type constraint.
+struct GH180362_D {
+  template <typename Self>
+    requires std::derived_from<Self, GH180362_D>
+  void baz(this Self &&self) {}
+  // CHECK-MESSAGES: :[[@LINE-1]]:24: warning: forwarding reference parameter 'self' is never forwarded inside the function body [cppcoreguidelines-missing-std-forward]
+};
+
+// Constrained non-explicit-object parameter IS still a forwarding reference.
+template <std::integral T>
+void gh180362_takes_integral(T &&t) {}
+// CHECK-MESSAGES: :[[@LINE-1]]:34: warning: forwarding reference parameter 't' is never forwarded inside the function body [cppcoreguidelines-missing-std-forward]

>From f441746a7b3986d2fcb1f973745b53e33e6c68e4 Mon Sep 17 00:00:00 2001
From: Jakub Kuderski <jakub at nod-labs.com>
Date: Sat, 7 Mar 2026 10:36:22 -0500
Subject: [PATCH 08/33] [ADT] Remove deprecated variadic `StringSwitch::Cases`
 and `CasesLower` (#185191)

These overloads have been deprecated since October 2025.
Use the `std::initializer_list` overload instead: `.Cases({"a", "b"},
Value)`.

For more context, see https://github.com/llvm/llvm-project/pull/163117.

Assisted-by: claude

Co-authored-by: Claude Opus 4.6 <noreply at anthropic.com>
---
 llvm/include/llvm/ADT/StringSwitch.h | 113 +++------------------------
 1 file changed, 10 insertions(+), 103 deletions(-)

diff --git a/llvm/include/llvm/ADT/StringSwitch.h b/llvm/include/llvm/ADT/StringSwitch.h
index 5da6076c27390..b144d51364855 100644
--- a/llvm/include/llvm/ADT/StringSwitch.h
+++ b/llvm/include/llvm/ADT/StringSwitch.h
@@ -87,67 +87,11 @@ class StringSwitch {
 
   StringSwitch &Cases(std::initializer_list<StringLiteral> CaseStrings,
                       T Value) {
-    return CasesImpl(CaseStrings, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
-    return CasesImpl({S0, S1}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      T Value) {
-    return CasesImpl({S0, S1, S2}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, T Value) {
-    return CasesImpl({S0, S1, S2, S3}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
-                      T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4, S5}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
-                      StringLiteral S6, T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4, S5, S6}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
-                      StringLiteral S6, StringLiteral S7, T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4, S5, S6, S7}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
-                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
-                      T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4, S5, S6, S7, S8}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
-                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
-                      StringLiteral S9, T Value) {
-    return CasesImpl({S0, S1, S2, S3, S4, S5, S6, S7, S8, S9}, Value);
+    // Stop matching after the string is found.
+    for (StringLiteral S : CaseStrings)
+      if (CaseImpl(S, Value))
+        break;
+    return *this;
   }
 
   // Case-insensitive case matchers.
@@ -172,30 +116,11 @@ class StringSwitch {
 
   StringSwitch &CasesLower(std::initializer_list<StringLiteral> CaseStrings,
                            T Value) {
-    return CasesLowerImpl(CaseStrings, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
-    return CasesLowerImpl({S0, S1}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                           T Value) {
-    return CasesLowerImpl({S0, S1, S2}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                           StringLiteral S3, T Value) {
-    return CasesLowerImpl({S0, S1, S2, S3}, Value);
-  }
-
-  [[deprecated("Pass cases in std::initializer_list instead")]]
-  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
-                           StringLiteral S3, StringLiteral S4, T Value) {
-    return CasesLowerImpl({S0, S1, S2, S3, S4}, Value);
+    // Stop matching after the string is found.
+    for (StringLiteral S : CaseStrings)
+      if (CaseLowerImpl(S, Value))
+        break;
+    return *this;
   }
 
   [[nodiscard]] R Default(T Value) {
@@ -240,24 +165,6 @@ class StringSwitch {
     Result = std::move(Value);
     return true;
   }
-
-  StringSwitch &CasesImpl(std::initializer_list<StringLiteral> Cases,
-                          T &Value) {
-    // Stop matching after the string is found.
-    for (StringLiteral S : Cases)
-      if (CaseImpl(S, Value))
-        break;
-    return *this;
-  }
-
-  StringSwitch &CasesLowerImpl(std::initializer_list<StringLiteral> Cases,
-                               T &Value) {
-    // Stop matching after the string is found.
-    for (StringLiteral S : Cases)
-      if (CaseLowerImpl(S, Value))
-        break;
-    return *this;
-  }
 };
 
 } // end namespace llvm

>From 1af9f7c0e6cfbdba380ad5c261c6244e7c8edd7a Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Sat, 7 Mar 2026 15:37:13 +0000
Subject: [PATCH 09/33] [LV] Add test for IG narrowing and epilogue with
 ordered reductions.

Add missing test coverage for narrowing interleave groups and countable
early exit and epilogue vectorization with ordered reductions.
---
 ...sform-narrow-interleave-to-widen-memory.ll | 154 ++++-
 .../epilog-vectorization-ordered-reduction.ll | 564 ++++++++++++++++++
 2 files changed, 715 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-ordered-reduction.ll

diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
index ea49a27b363db..5c82ecf0cc834 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/transform-narrow-interleave-to-widen-memory.ll
@@ -98,6 +98,154 @@ exit:
   ret void
 }
 
+define void @interleave_group_with_countable_early_exit(i64 %n, ptr %dst) {
+; CHECK-LABEL: define void @interleave_group_with_countable_early_exit(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 1
+; CHECK-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP2]], i64 38)
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP0]], [[UMAX]]
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; CHECK:       [[VECTOR_SCEVCHECK]]:
+; CHECK-NEXT:    [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[N]])
+; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 [[MUL_RESULT]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[DST]]
+; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 8
+; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[N]])
+; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
+; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT3]]
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ult ptr [[TMP6]], [[SCEVGEP]]
+; CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW4]]
+; CHECK-NEXT:    [[TMP9:%.*]] = or i1 [[TMP5]], [[TMP8]]
+; CHECK-NEXT:    br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 1
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP11]]
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP11]], i64 [[N_MOD_VF]]
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP13]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr { i64, i64 }, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT:    store <vscale x 2 x i64> zeroinitializer, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br label %[[SCALAR_PH]]
+; CHECK:       [[SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP_HEADER:.*]]
+; CHECK:       [[LOOP_HEADER]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT:    [[COND:%.*]] = icmp ugt i64 [[IV]], [[N]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT1:.*]], label %[[LOOP_LATCH]]
+; CHECK:       [[EXIT1]]:
+; CHECK-NEXT:    ret void
+; CHECK:       [[LOOP_LATCH]]:
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr { i64, i64 }, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT:    store i64 0, ptr [[GEP1]], align 8
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr [[GEP1]], i64 8
+; CHECK-NEXT:    store i64 0, ptr [[GEP2]], align 8
+; CHECK-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[N]], [[IV]]
+; CHECK-NEXT:    br i1 [[CMP]], label %[[EXIT2:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK:       [[EXIT2]]:
+; CHECK-NEXT:    ret void
+;
+; EPILOGUE-LABEL: define void @interleave_group_with_countable_early_exit(
+; EPILOGUE-SAME: i64 [[N:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
+; EPILOGUE-NEXT:  [[ENTRY:.*]]:
+; EPILOGUE-NEXT:    [[TMP0:%.*]] = add i64 [[N]], 1
+; EPILOGUE-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; EPILOGUE-NEXT:    [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 1
+; EPILOGUE-NEXT:    [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP2]], i64 40)
+; EPILOGUE-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP0]], [[UMAX]]
+; EPILOGUE-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; EPILOGUE:       [[VECTOR_SCEVCHECK]]:
+; EPILOGUE-NEXT:    [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[N]])
+; EPILOGUE-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
+; EPILOGUE-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
+; EPILOGUE-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 [[MUL_RESULT]]
+; EPILOGUE-NEXT:    [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[DST]]
+; EPILOGUE-NEXT:    [[TMP5:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]]
+; EPILOGUE-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 8
+; EPILOGUE-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[N]])
+; EPILOGUE-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
+; EPILOGUE-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
+; EPILOGUE-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT3]]
+; EPILOGUE-NEXT:    [[TMP7:%.*]] = icmp ult ptr [[TMP6]], [[SCEVGEP]]
+; EPILOGUE-NEXT:    [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW4]]
+; EPILOGUE-NEXT:    [[TMP9:%.*]] = or i1 [[TMP5]], [[TMP8]]
+; EPILOGUE-NEXT:    br i1 [[TMP9]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; EPILOGUE:       [[VECTOR_PH]]:
+; EPILOGUE-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; EPILOGUE-NEXT:    [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 1
+; EPILOGUE-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP11]]
+; EPILOGUE-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; EPILOGUE-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i64 [[TMP11]], i64 [[N_MOD_VF]]
+; EPILOGUE-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP13]]
+; EPILOGUE-NEXT:    br label %[[VECTOR_BODY:.*]]
+; EPILOGUE:       [[VECTOR_BODY]]:
+; EPILOGUE-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; EPILOGUE-NEXT:    [[TMP14:%.*]] = getelementptr { i64, i64 }, ptr [[DST]], i64 [[INDEX]]
+; EPILOGUE-NEXT:    store <vscale x 2 x i64> zeroinitializer, ptr [[TMP14]], align 8
+; EPILOGUE-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; EPILOGUE-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; EPILOGUE-NEXT:    br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; EPILOGUE:       [[MIDDLE_BLOCK]]:
+; EPILOGUE-NEXT:    br label %[[SCALAR_PH]]
+; EPILOGUE:       [[SCALAR_PH]]:
+; EPILOGUE-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ]
+; EPILOGUE-NEXT:    br label %[[LOOP_HEADER:.*]]
+; EPILOGUE:       [[LOOP_HEADER]]:
+; EPILOGUE-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; EPILOGUE-NEXT:    [[COND:%.*]] = icmp ugt i64 [[IV]], [[N]]
+; EPILOGUE-NEXT:    br i1 [[COND]], label %[[EXIT1:.*]], label %[[LOOP_LATCH]]
+; EPILOGUE:       [[EXIT1]]:
+; EPILOGUE-NEXT:    ret void
+; EPILOGUE:       [[LOOP_LATCH]]:
+; EPILOGUE-NEXT:    [[GEP1:%.*]] = getelementptr { i64, i64 }, ptr [[DST]], i64 [[IV]]
+; EPILOGUE-NEXT:    store i64 0, ptr [[GEP1]], align 8
+; EPILOGUE-NEXT:    [[GEP2:%.*]] = getelementptr i8, ptr [[GEP1]], i64 8
+; EPILOGUE-NEXT:    store i64 0, ptr [[GEP2]], align 8
+; EPILOGUE-NEXT:    [[IV_NEXT]] = add i64 [[IV]], 1
+; EPILOGUE-NEXT:    [[CMP:%.*]] = icmp eq i64 [[N]], [[IV]]
+; EPILOGUE-NEXT:    br i1 [[CMP]], label %[[EXIT2:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]]
+; EPILOGUE:       [[EXIT2]]:
+; EPILOGUE-NEXT:    ret void
+;
+entry:
+  br label %loop.header
+
+loop.header:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+  %cond = icmp ugt i64 %iv, %n
+  br i1 %cond, label %exit1, label %loop.latch
+
+exit1:
+  ret void
+
+loop.latch:
+  %gep1 = getelementptr { i64, i64 }, ptr %dst, i64 %iv
+  store i64 0, ptr %gep1, align 8
+  %gep2 = getelementptr i8, ptr %gep1, i64 8
+  store i64 0, ptr %gep2, align 8
+  %iv.next = add i64 %iv, 1
+  %cmp = icmp eq i64 %n, %iv
+  br i1 %cmp, label %exit2, label %loop.header
+
+exit2:
+  ret void
+}
 
 define void @load_store_interleave_group_i32(ptr noalias %data) {
 ; CHECK-LABEL: define void @load_store_interleave_group_i32(
@@ -126,7 +274,7 @@ define void @load_store_interleave_group_i32(ptr noalias %data) {
 ; CHECK-NEXT:    [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP5]], [[EVL_BASED_IV]]
 ; CHECK-NEXT:    [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
-; CHECK-NEXT:    br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    br label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
@@ -153,7 +301,7 @@ define void @load_store_interleave_group_i32(ptr noalias %data) {
 ; EPILOGUE-NEXT:    store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP5]], align 8
 ; EPILOGUE-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
 ; EPILOGUE-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; EPILOGUE-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; EPILOGUE-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
 ; EPILOGUE:       [[MIDDLE_BLOCK]]:
 ; EPILOGUE-NEXT:    [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
 ; EPILOGUE-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -180,7 +328,7 @@ define void @load_store_interleave_group_i32(ptr noalias %data) {
 ; EPILOGUE-NEXT:    store i32 [[L_3]], ptr [[DATA_3]], align 8
 ; EPILOGUE-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; EPILOGUE-NEXT:    [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
-; EPILOGUE-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; EPILOGUE-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
 ; EPILOGUE:       [[EXIT]]:
 ; EPILOGUE-NEXT:    ret void
 ;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-ordered-reduction.ll
new file mode 100644
index 0000000000000..2e946693af294
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-ordered-reduction.ll
@@ -0,0 +1,564 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6
+; RUN: opt -p loop-vectorize -force-ordered-reductions -mcpu=tigerlake -S %s | FileCheck %s
+
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx15.0.0"
+
+define float @ordered_reduction_epilogue(ptr %p, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define float @ordered_reduction_epilogue(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 64
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], 64
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = phi float [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD4:%.*]] = load <16 x float>, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP3]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP4]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP5]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[TMP9]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD4]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8
+; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[N_MOD_VF5:%.*]] = urem i64 [[N]], 8
+; CHECK-NEXT:    [[N_VEC5:%.*]] = sub i64 [[N]], [[N_MOD_VF5]]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL1]], %[[VEC_EPILOG_PH]] ], [ [[BOUND:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI8:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP11:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr float, ptr [[P]], i64 [[VEC_EPILOG_RESUME_VAL]]
+; CHECK-NEXT:    [[WIDE_LOAD9:%.*]] = load <8 x float>, ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP11]] = call float @llvm.vector.reduce.fadd.v8f32(float [[VEC_PHI8]], <8 x float> [[WIDE_LOAD9]])
+; CHECK-NEXT:    [[BOUND]] = add nuw i64 [[VEC_EPILOG_RESUME_VAL]], 8
+; CHECK-NEXT:    [[CMP_N7:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC5]]
+; CHECK-NEXT:    br i1 [[CMP_N7]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC5]]
+; CHECK-NEXT:    br i1 [[CMP_N11]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX12:%.*]] = phi float [ [[TMP11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED:%.*]] = phi float [ [[BC_MERGE_RDX12]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-NEXT:    [[FADD]] = fadd float [[RED]], [[LOAD]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[FADD]], %[[LOOP]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ [[TMP11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red = phi float [ 0.0, %entry ], [ %fadd, %loop ]
+  %gep = getelementptr float, ptr %p, i64 %iv
+  %load = load float, ptr %gep, align 4
+  %fadd = fadd float %red, %load
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, %n
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %res = phi float [ %fadd, %loop ]
+  ret float %res
+}
+
+define float @ordered_reduction_epilogue_single_vector_iterations(ptr %p, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define float @ordered_reduction_epilogue_single_vector_iterations(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    [[BOUND:%.*]] = call i64 @llvm.umin.i64(i64 [[N]], i64 16)
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI]], <16 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP4]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP5]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP7]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br i1 false, label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 64, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI5:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP10:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX4]]
+; CHECK-NEXT:    [[WIDE_LOAD6:%.*]] = load <16 x float>, ptr [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP10]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI5]], <16 x float> [[WIDE_LOAD6]])
+; CHECK-NEXT:    [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 16
+; CHECK-NEXT:    [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT7]], 80
+; CHECK-NEXT:    br i1 [[TMP11]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br i1 true, label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 80, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 64, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX8:%.*]] = phi float [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED:%.*]] = phi float [ [[BC_MERGE_RDX8]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-NEXT:    [[FADD]] = fadd float [[RED]], [[LOAD]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 80
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[FADD]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+entry:
+  %bound = call i64 @llvm.umin.i64(i64 %n, i64 16)
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red = phi float [ 0.0, %entry ], [ %fadd, %loop ]
+  %gep = getelementptr float, ptr %p, i64 %iv
+  %load = load float, ptr %gep, align 4
+  %fadd = fadd float %red, %load
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, 80
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %res = phi float [ %fadd, %loop ]
+  ret float %res
+}
+
+define float @ordered_reduction_nonzero_start_single_iteration_vector_loops(ptr %p, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define float @ordered_reduction_nonzero_start_single_iteration_vector_loops(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 4.200000e+01, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI]], <16 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP4]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP5]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP7]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
+; CHECK-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br i1 false, label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 64, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 4.200000e+01, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI5:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP10:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX4]]
+; CHECK-NEXT:    [[WIDE_LOAD6:%.*]] = load <16 x float>, ptr [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP10]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI5]], <16 x float> [[WIDE_LOAD6]])
+; CHECK-NEXT:    [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 16
+; CHECK-NEXT:    [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT7]], 80
+; CHECK-NEXT:    br i1 [[TMP11]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br i1 true, label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 80, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 64, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX8:%.*]] = phi float [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 4.200000e+01, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED:%.*]] = phi float [ [[BC_MERGE_RDX8]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-NEXT:    [[FADD]] = fadd float [[RED]], [[LOAD]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 80
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[FADD]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red = phi float [ 42.0, %entry ], [ %fadd, %loop ]
+  %gep = getelementptr float, ptr %p, i64 %iv
+  %load = load float, ptr %gep, align 4
+  %fadd = fadd float %red, %load
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, 80
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %res = phi float [ %fadd, %loop ]
+  ret float %res
+}
+
+; FIXME: Should not generate dead main vector loop.
+define float @ordered_reduction_epilogue_dead_main_loop(ptr %p, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define float @ordered_reduction_epilogue_dead_main_loop(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    [[BOUND:%.*]] = call i64 @llvm.umin.i64(i64 [[N]], i64 16)
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[BOUND]], 16
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[BOUND]], 64
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI]], <16 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP4]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP5]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP7]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 16
+; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[N_MOD_VF4:%.*]] = urem i64 [[BOUND]], 16
+; CHECK-NEXT:    [[N_VEC5:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF4]]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI7:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX6]]
+; CHECK-NEXT:    [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP9]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI7]], <16 x float> [[WIDE_LOAD8]])
+; CHECK-NEXT:    [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 16
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N10:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC5]]
+; CHECK-NEXT:    br i1 [[CMP_N10]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX11:%.*]] = phi float [ [[TMP9]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED:%.*]] = phi float [ [[BC_MERGE_RDX11]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-NEXT:    [[FADD]] = fadd float [[RED]], [[LOAD]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[BOUND]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[FADD]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+entry:
+  %bound = call i64 @llvm.umin.i64(i64 %n, i64 16)
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red = phi float [ 0.0, %entry ], [ %fadd, %loop ]
+  %gep = getelementptr float, ptr %p, i64 %iv
+  %load = load float, ptr %gep, align 4
+  %fadd = fadd float %red, %load
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, %bound
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %res = phi float [ %fadd, %loop ]
+  ret float %res
+}
+
+; FIXME: Should not generate dead main vector loop.
+; Same as above but with a non-zero start value for the reduction.
+define float @ordered_reduction_nonzero_start_dead_main_vector_loop(ptr %p, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define float @ordered_reduction_nonzero_start_dead_main_vector_loop(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    [[BOUND:%.*]] = call i64 @llvm.umin.i64(i64 [[N]], i64 16)
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[BOUND]], 16
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[BOUND]], 64
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi float [ 4.200000e+01, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI]], <16 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP4]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP5]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP7]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 16
+; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 4.200000e+01, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[N_MOD_VF4:%.*]] = urem i64 [[BOUND]], 16
+; CHECK-NEXT:    [[N_VEC5:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF4]]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX6:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI7:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[VEC_EPILOG_PH]] ], [ [[TMP9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX6]]
+; CHECK-NEXT:    [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP9]] = call float @llvm.vector.reduce.fadd.v16f32(float [[VEC_PHI7]], <16 x float> [[WIDE_LOAD8]])
+; CHECK-NEXT:    [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 16
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N10:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC5]]
+; CHECK-NEXT:    br i1 [[CMP_N10]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX11:%.*]] = phi float [ [[TMP9]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 4.200000e+01, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED:%.*]] = phi float [ [[BC_MERGE_RDX11]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD:%.*]] = load float, ptr [[GEP]], align 4
+; CHECK-NEXT:    [[FADD]] = fadd float [[RED]], [[LOAD]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[BOUND]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RES:%.*]] = phi float [ [[FADD]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    ret float [[RES]]
+;
+entry:
+  %bound = call i64 @llvm.umin.i64(i64 %n, i64 16)
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red = phi float [ 42.0, %entry ], [ %fadd, %loop ]
+  %gep = getelementptr float, ptr %p, i64 %iv
+  %load = load float, ptr %gep, align 4
+  %fadd = fadd float %red, %load
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, %bound
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %res = phi float [ %fadd, %loop ]
+  ret float %res
+}
+
+; Two ordered reductions with different start values in the same loop.
+define { float, float } @two_ordered_reductions(ptr %p, ptr %q, i64 %n) "prefer-vector-width"="512" {
+; CHECK-LABEL: define { float, float } @two_ordered_reductions(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ITER_CHECK:.*]]:
+; CHECK-NEXT:    [[BOUND:%.*]] = call i64 @llvm.umin.i64(i64 [[N]], i64 16)
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[BOUND]], 16
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK:       [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[BOUND]], 64
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = phi float [ 0.000000e+00, %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP11:%.*]] = phi float [ 1.000000e+00, %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr float, ptr [[TMP0]], i64 16
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[TMP0]], i64 32
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr float, ptr [[TMP0]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD2:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD5:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr float, ptr [[Q]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr float, ptr [[TMP4]], i64 16
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr float, ptr [[TMP4]], i64 32
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr float, ptr [[TMP4]], i64 48
+; CHECK-NEXT:    [[WIDE_LOAD6:%.*]] = load <16 x float>, ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD7:%.*]] = load <16 x float>, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD9:%.*]] = load <16 x float>, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP20]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP6]], <16 x float> [[WIDE_LOAD1]])
+; CHECK-NEXT:    [[TMP8:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP7]], <16 x float> [[WIDE_LOAD2]])
+; CHECK-NEXT:    [[TMP9:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP8]], <16 x float> [[WIDE_LOAD3]])
+; CHECK-NEXT:    [[TMP18]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP9]], <16 x float> [[WIDE_LOAD5]])
+; CHECK-NEXT:    [[TMP12:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP11]], <16 x float> [[WIDE_LOAD6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP12]], <16 x float> [[WIDE_LOAD7]])
+; CHECK-NEXT:    [[TMP21:%.*]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP13]], <16 x float> [[WIDE_LOAD9]])
+; CHECK-NEXT:    [[TMP19]] = call float @llvm.vector.reduce.fadd.v16f32(float [[TMP21]], <16 x float> [[WIDE_LOAD8]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-NEXT:    br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
+; CHECK:       [[VEC_EPILOG_ITER_CHECK]]:
+; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 16
+; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]]
+; CHECK:       [[VEC_EPILOG_PH]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX1:%.*]] = phi float [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX9:%.*]] = phi float [ [[TMP19]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT:    [[N_MOD_VF9:%.*]] = urem i64 [[BOUND]], 16
+; CHECK-NEXT:    [[N_VEC10:%.*]] = sub i64 [[BOUND]], [[N_MOD_VF9]]
+; CHECK-NEXT:    br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
+; CHECK:       [[VEC_EPILOG_VECTOR_BODY]]:
+; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL1]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT17:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX:%.*]] = phi float [ [[BC_MERGE_RDX1]], %[[VEC_EPILOG_PH]] ], [ [[TMP16:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX8:%.*]] = phi float [ [[BC_MERGE_RDX9]], %[[VEC_EPILOG_PH]] ], [ [[TMP17:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr float, ptr [[P]], i64 [[VEC_EPILOG_RESUME_VAL]]
+; CHECK-NEXT:    [[WIDE_LOAD11:%.*]] = load <16 x float>, ptr [[TMP14]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr float, ptr [[Q]], i64 [[VEC_EPILOG_RESUME_VAL]]
+; CHECK-NEXT:    [[WIDE_LOAD12:%.*]] = load <16 x float>, ptr [[TMP15]], align 4
+; CHECK-NEXT:    [[TMP16]] = call float @llvm.vector.reduce.fadd.v16f32(float [[BC_MERGE_RDX]], <16 x float> [[WIDE_LOAD11]])
+; CHECK-NEXT:    [[TMP17]] = call float @llvm.vector.reduce.fadd.v16f32(float [[BC_MERGE_RDX8]], <16 x float> [[WIDE_LOAD12]])
+; CHECK-NEXT:    [[INDEX_NEXT17]] = add nuw i64 [[VEC_EPILOG_RESUME_VAL]], 16
+; CHECK-NEXT:    br i1 true, label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N13:%.*]] = icmp eq i64 [[BOUND]], [[N_VEC10]]
+; CHECK-NEXT:    br i1 [[CMP_N13]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
+; CHECK:       [[VEC_EPILOG_SCALAR_PH]]:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX19:%.*]] = phi float [ [[TMP16]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    [[BC_MERGE_RDX20:%.*]] = phi float [ [[TMP17]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP19]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[ITER_CHECK]] ]
+; CHECK-NEXT:    br label %[[LOOP:.*]]
+; CHECK:       [[LOOP]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED1:%.*]] = phi float [ [[BC_MERGE_RDX19]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD1:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[RED2:%.*]] = phi float [ [[BC_MERGE_RDX20]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[FADD2:%.*]], %[[LOOP]] ]
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr float, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD1:%.*]] = load float, ptr [[GEP1]], align 4
+; CHECK-NEXT:    [[FADD1]] = fadd float [[RED1]], [[LOAD1]]
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr float, ptr [[Q]], i64 [[IV]]
+; CHECK-NEXT:    [[LOAD2:%.*]] = load float, ptr [[GEP2]], align 4
+; CHECK-NEXT:    [[FADD2]] = fadd float [[RED2]], [[LOAD2]]
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[BOUND]]
+; CHECK-NEXT:    br i1 [[COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[R1:%.*]] = phi float [ [[FADD1]], %[[LOOP]] ], [ [[TMP18]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    [[R2:%.*]] = phi float [ [[FADD2]], %[[LOOP]] ], [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP17]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    [[RET:%.*]] = insertvalue { float, float } undef, float [[R1]], 0
+; CHECK-NEXT:    [[RET2:%.*]] = insertvalue { float, float } [[RET]], float [[R2]], 1
+; CHECK-NEXT:    ret { float, float } [[RET2]]
+;
+entry:
+  %bound = call i64 @llvm.umin.i64(i64 %n, i64 16)
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %red1 = phi float [ 0.0, %entry ], [ %fadd1, %loop ]
+  %red2 = phi float [ 1.0, %entry ], [ %fadd2, %loop ]
+  %gep1 = getelementptr float, ptr %p, i64 %iv
+  %load1 = load float, ptr %gep1, align 4
+  %fadd1 = fadd float %red1, %load1
+  %gep2 = getelementptr float, ptr %q, i64 %iv
+  %load2 = load float, ptr %gep2, align 4
+  %fadd2 = fadd float %red2, %load2
+  %iv.next = add nuw nsw i64 %iv, 1
+  %cond = icmp eq i64 %iv.next, %bound
+  br i1 %cond, label %exit, label %loop
+
+exit:
+  %r1 = phi float [ %fadd1, %loop ]
+  %r2 = phi float [ %fadd2, %loop ]
+  %ret = insertvalue { float, float } undef, float %r1, 0
+  %ret2 = insertvalue { float, float } %ret, float %r2, 1
+  ret { float, float } %ret2
+}
+

>From 30434bab1a85451b0a85fab94a835a7a40818ffa Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sun, 8 Mar 2026 00:00:09 +0800
Subject: [PATCH 10/33] [Reland][llubi] Add support for load/store/lifetime
 markers (#185196)

This patch relands https://github.com/llvm/llvm-project/pull/182532. The
original version causes test failures related undef bits since it
incorrectly assumes `std::uniform_int_distribution` yields the same
results across different stdlib vendors. This patch simply uses low bits
to avoid the issue. I am not sure whether it still generates uniformly
distributed random numbers. But abseil also uses this trick:
https://github.com/abseil/abseil-cpp/blob/e72b94a2f257ba069ec0b99e557e9f1f6b9c1a3e/absl/random/uniform_int_distribution.h#L203-L206

I have confirmed all tests passed with libstdc++ and libc++.

Original PR description:
Bytes are adjusted to respect the incoming byte type proposed in
https://discourse.llvm.org/t/rfc-add-a-new-byte-type-to-llvm-ir/89522.

Note that the current implementation of constant folding doesn't handle
bitcasts with weird types like `<8 x i3> to <3 x i8>`:

https://github.com/llvm/llvm-project/blob/49de34459c99b33c27aae4596ed5e67ce3d89eae/llvm/lib/Analysis/ConstantFolding.cpp#L223-L232
So I take the result from Alive2 as reference:
https://alive2.llvm.org/ce/z/ZGA_xP
---
 llvm/docs/CommandGuide/llubi.rst              |  12 +
 llvm/test/tools/llubi/loadstore_be.ll         | 190 ++++++++++
 llvm/test/tools/llubi/loadstore_le.ll         | 192 ++++++++++
 llvm/test/tools/llubi/loadstore_misaligned.ll |  14 +
 llvm/test/tools/llubi/loadstore_null.ll       |  10 +
 llvm/test/tools/llubi/loadstore_oob1.ll       |  14 +
 .../test/tools/llubi/loadstore_overaligned.ll |  30 ++
 llvm/test/tools/llubi/loadstore_poison.ll     |  10 +
 llvm/test/tools/llubi/loadstore_uaf.ll        |  21 ++
 llvm/test/tools/llubi/store_dead.ll           |  18 +
 llvm/tools/llubi/lib/Context.cpp              | 337 ++++++++++++++++--
 llvm/tools/llubi/lib/Context.h                |  46 ++-
 llvm/tools/llubi/lib/Interpreter.cpp          | 135 ++++++-
 llvm/tools/llubi/lib/Value.h                  |  63 +++-
 llvm/tools/llubi/llubi.cpp                    |  23 +-
 15 files changed, 1050 insertions(+), 65 deletions(-)
 create mode 100644 llvm/test/tools/llubi/loadstore_be.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_le.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_misaligned.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_null.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_oob1.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_overaligned.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_poison.ll
 create mode 100644 llvm/test/tools/llubi/loadstore_uaf.ll
 create mode 100644 llvm/test/tools/llubi/store_dead.ll

diff --git a/llvm/docs/CommandGuide/llubi.rst b/llvm/docs/CommandGuide/llubi.rst
index f652af83d810a..55528373cffef 100644
--- a/llvm/docs/CommandGuide/llubi.rst
+++ b/llvm/docs/CommandGuide/llubi.rst
@@ -70,6 +70,18 @@ INTERPRETER OPTIONS
 
   Set the value of `llvm.vscale` to N. The default value is 4.
 
+.. option:: -seed=N
+
+  Set the seed for random number generator to N. By default, the seed is 0.
+
+.. option:: -undef-behavior=mode
+
+  Set the behavior for undefined values (e.g., load from uninitialized memory or freeze a poison value).
+  The options for `mode` are:
+
+  * `nondet`: Each load from the same uninitialized byte yields a freshly random value. This is the default behavior.
+  * `zero`: Uninitialized values are treated as zero.
+
 EXIT STATUS
 -----------
 
diff --git a/llvm/test/tools/llubi/loadstore_be.ll b/llvm/test/tools/llubi/loadstore_be.ll
new file mode 100644
index 0000000000000..3dc03de73f303
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_be.ll
@@ -0,0 +1,190 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: llubi --verbose < %s 2>&1 | FileCheck %s
+
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
+
+%struct = type { [2 x i16], i64 }
+%struct.packed = type <{ [2 x i16], i64 }>
+%struct.vscale = type { <vscale x 1 x i32>, <vscale x 1 x i32> }
+
+define void @main() {
+  %alloc = alloca i32
+  store i32 u0x00010203, ptr %alloc
+  %val1 = load i32, ptr %alloc
+  %val2 = load i32, ptr %alloc, align 2
+  %gep = getelementptr i8, ptr %alloc, i64 1
+  %val3 = load i8, ptr %gep
+  %val4 = load <4 x i8>, ptr %alloc
+
+  store i16 u0x0405, ptr %gep, align 1
+  %val5 = load <4 x i8>, ptr %alloc
+  store <2 x i16> <i16 u0x0607, i16 u0x0809>, ptr %alloc
+  %val6 = load <4 x i8>, ptr %alloc
+  %val7 = load <8 x i4>, ptr %alloc
+  store <3 x i3> <i3 1, i3 2, i3 3>, ptr %alloc
+  ; padding bits are undefined.
+  %val8 = load <16 x i1>, ptr %alloc
+  %val9 = load <16 x i1>, ptr %alloc
+  store <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7>, ptr %alloc
+  %val_bitcast = load <3 x i8>, ptr %alloc
+
+  ; For non-byte-sized types, the padding bits must be zero.
+  store i25 -1, ptr %alloc
+  %val10 = load <4 x i8>, ptr %alloc
+  store i8 -1, ptr %alloc
+  ; If the padding bits were not zero, load yields poison value.
+  %val11 = load i25, ptr %alloc
+
+  call void @llvm.lifetime.start.p0(ptr poison)
+  call void @llvm.lifetime.end.p0(ptr poison)
+  %alloc_lifetime = alloca i32
+  ; Load of an dead object yields poison value.
+  %val12 = load i32, ptr %alloc_lifetime
+  call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+  ; After lifetime.start, the object is alive but uninitialized.
+  %val13 = load i32, ptr %alloc_lifetime
+  %val14 = load i32, ptr %alloc_lifetime
+  store i32 77, ptr %alloc_lifetime
+  %val15 = load i32, ptr %alloc_lifetime
+  ; Calling lifetime.start on an alive object makes the contents uninitialized again.
+  call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+  %val_undef1 = load i32, ptr %alloc_lifetime
+  %val_undef2 = load i32, ptr %alloc_lifetime
+  call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+  ; Calling lifetime.end on an already dead object is a no-op.
+  call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+  ; Load of an dead object yields poison value.
+  %val16 = load i32, ptr %alloc_lifetime
+
+  store i32 u0xFFF80000, ptr %alloc
+  %val17 = load float, ptr %alloc
+
+  %alloc_vscale = alloca <vscale x 2 x i32>
+  %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0
+  %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %twos = add <vscale x 1 x i32> %ones, %ones
+  store <vscale x 1 x i32> %ones, ptr %alloc_vscale
+  %gep3 = getelementptr <vscale x 1 x i32>, ptr %alloc_vscale, i64 1
+  store <vscale x 1 x i32> %twos, ptr %gep3
+  %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale
+
+  %alloc_struct = alloca %struct
+  store %struct { [2 x i16] [i16 1, i16 2], i64 3 }, ptr %alloc_struct
+  %val19 = load %struct, ptr %alloc_struct
+  ; Padding bytes of struct are undefined.
+  %val20 = load i64, ptr %alloc_struct
+  %val21 = load i64, ptr %alloc_struct
+
+  %alloc_struct_packed = alloca %struct.packed
+  store %struct.packed <{ [2 x i16] [i16 1, i16 2], i64 3 }>, ptr %alloc_struct_packed
+  %val22 = load %struct.packed, ptr %alloc_struct_packed
+  ; No padding bytes.
+  %val23 = load i64, ptr %alloc_struct_packed
+  %val24 = load i64, ptr %alloc_struct_packed
+
+  %alloc_struct_vscale = alloca %struct.vscale
+  store %struct.vscale zeroinitializer, ptr %alloc_struct_vscale
+  %gep4 = getelementptr <vscale x 1 x i32>, ptr %alloc_struct_vscale, i32 1
+  store <vscale x 1 x i32> %ones, ptr %gep4
+  %val25 = load %struct.vscale, ptr %alloc_struct_vscale
+
+  %alloc_array = alloca [2 x i32]
+  store [2 x i32] [i32 1, i32 2], ptr %alloc_array
+  %val26 = load [2 x i32], ptr %alloc_array
+
+  %alloc_i1_vec = alloca <4 x i1>
+  store <4 x i1> <i1 1, i1 0, i1 poison, i1 0>, ptr %alloc_i1_vec
+  %val27 = load <4 x i1>, ptr %alloc_i1_vec
+  %val28 = load i8, ptr %alloc_i1_vec
+
+  %alloc_padding = alloca i31
+  store i32 0, ptr %alloc_padding
+
+  %alloc_padding_vec = alloca i64
+  store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1}, ptr %alloc_padding_vec
+  %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec
+  %load_vec = load <6 x i5>, ptr %alloc_padding_vec
+  %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec
+  %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec
+
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT:   %alloc = alloca i32, align 4 => ptr 0x8 [alloc]
+; CHECK-NEXT:   store i32 66051, ptr %alloc, align 4
+; CHECK-NEXT:   %val1 = load i32, ptr %alloc, align 4 => i32 66051
+; CHECK-NEXT:   %val2 = load i32, ptr %alloc, align 2 => i32 66051
+; CHECK-NEXT:   %gep = getelementptr i8, ptr %alloc, i64 1 => ptr 0x9 [alloc + 1]
+; CHECK-NEXT:   %val3 = load i8, ptr %gep, align 1 => i8 1
+; CHECK-NEXT:   %val4 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 2, i8 1, i8 0 }
+; CHECK-NEXT:   store i16 1029, ptr %gep, align 1
+; CHECK-NEXT:   %val5 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 5, i8 4, i8 0 }
+; CHECK-NEXT:   store <2 x i16> <i16 1543, i16 2057>, ptr %alloc, align 4
+; CHECK-NEXT:   %val6 = load <4 x i8>, ptr %alloc, align 4 => { i8 7, i8 6, i8 9, i8 8 }
+; CHECK-NEXT:   %val7 = load <8 x i4>, ptr %alloc, align 4 => { i4 0, i4 7, i4 0, i4 6, i4 0, i4 -7, i4 0, i4 -8 }
+; CHECK-NEXT:   store <3 x i3> <i3 1, i3 2, i3 3>, ptr %alloc, align 2
+; CHECK-NEXT:   %val8 = load <16 x i1>, ptr %alloc, align 2 => { F, F, F, F, F, F, F, F, F, T, F, T, F, F, T, T }
+; CHECK-NEXT:   %val9 = load <16 x i1>, ptr %alloc, align 2 => { F, F, F, F, F, F, F, F, F, T, F, T, F, F, T, T }
+; CHECK-NEXT:   store <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1>, ptr %alloc, align 4
+; CHECK-NEXT:   %val_bitcast = load <3 x i8>, ptr %alloc, align 4 => { i8 5, i8 57, i8 119 }
+; CHECK-NEXT:   store i25 -1, ptr %alloc, align 4
+; CHECK-NEXT:   %val10 = load <4 x i8>, ptr %alloc, align 4 => { i8 -1, i8 -1, i8 -1, i8 1 }
+; CHECK-NEXT:   store i8 -1, ptr %alloc, align 1
+; CHECK-NEXT:   %val11 = load i25, ptr %alloc, align 4 => poison
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr poison)
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr poison)
+; CHECK-NEXT:   %alloc_lifetime = alloca i32, align 4 => ptr 0xC [alloc_lifetime]
+; CHECK-NEXT:   %val12 = load i32, ptr %alloc_lifetime, align 4 => poison
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val13 = load i32, ptr %alloc_lifetime, align 4 => i32 -289830082
+; CHECK-NEXT:   %val14 = load i32, ptr %alloc_lifetime, align 4 => i32 -399511892
+; CHECK-NEXT:   store i32 77, ptr %alloc_lifetime, align 4
+; CHECK-NEXT:   %val15 = load i32, ptr %alloc_lifetime, align 4 => i32 77
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val_undef1 = load i32, ptr %alloc_lifetime, align 4 => i32 -132966042
+; CHECK-NEXT:   %val_undef2 = load i32, ptr %alloc_lifetime, align 4 => i32 1094190734
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
+; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => NaN
+; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
+; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
+; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
+; CHECK-NEXT:   %twos = add <vscale x 1 x i32> %ones, %ones => { i32 2, i32 2, i32 2, i32 2 }
+; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %alloc_vscale, align 4
+; CHECK-NEXT:   %gep3 = getelementptr <vscale x 1 x i32>, ptr %alloc_vscale, i64 1 => ptr 0x20 [alloc_vscale + 16]
+; CHECK-NEXT:   store <vscale x 1 x i32> %twos, ptr %gep3, align 4
+; CHECK-NEXT:   %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale, align 8 => { i32 2, i32 2, i32 2, i32 2, i32 1, i32 1, i32 1, i32 1 }
+; CHECK-NEXT:   %alloc_struct = alloca %struct, align 8 => ptr 0x30 [alloc_struct]
+; CHECK-NEXT:   store %struct { [2 x i16] [i16 1, i16 2], i64 3 }, ptr %alloc_struct, align 8
+; CHECK-NEXT:   %val19 = load %struct, ptr %alloc_struct, align 8 => { { i16 1, i16 2 }, i64 3 }
+; CHECK-NEXT:   %val20 = load i64, ptr %alloc_struct, align 8 => i64 281486191577587
+; CHECK-NEXT:   %val21 = load i64, ptr %alloc_struct, align 8 => i64 281486962127119
+; CHECK-NEXT:   %alloc_struct_packed = alloca %struct.packed, align 8 => ptr 0x40 [alloc_struct_packed]
+; CHECK-NEXT:   store %struct.packed <{ [2 x i16] [i16 1, i16 2], i64 3 }>, ptr %alloc_struct_packed, align 1
+; CHECK-NEXT:   %val22 = load %struct.packed, ptr %alloc_struct_packed, align 1 => { { i16 1, i16 2 }, i64 3 }
+; CHECK-NEXT:   %val23 = load i64, ptr %alloc_struct_packed, align 8 => i64 281483566645248
+; CHECK-NEXT:   %val24 = load i64, ptr %alloc_struct_packed, align 8 => i64 281483566645248
+; CHECK-NEXT:   %alloc_struct_vscale = alloca %struct.vscale, align 8 => ptr 0x50 [alloc_struct_vscale]
+; CHECK-NEXT:   store %struct.vscale zeroinitializer, ptr %alloc_struct_vscale, align 4
+; CHECK-NEXT:   %gep4 = getelementptr <vscale x 1 x i32>, ptr %alloc_struct_vscale, i32 1 => ptr 0x60 [alloc_struct_vscale + 16]
+; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %gep4, align 4
+; CHECK-NEXT:   %val25 = load %struct.vscale, ptr %alloc_struct_vscale, align 4 => { { i32 0, i32 0, i32 0, i32 0 }, { i32 1, i32 1, i32 1, i32 1 } }
+; CHECK-NEXT:   %alloc_array = alloca [2 x i32], align 4 => ptr 0x70 [alloc_array]
+; CHECK-NEXT:   store [2 x i32] [i32 1, i32 2], ptr %alloc_array, align 4
+; CHECK-NEXT:   %val26 = load [2 x i32], ptr %alloc_array, align 4 => { i32 1, i32 2 }
+; CHECK-NEXT:   %alloc_i1_vec = alloca <4 x i1>, align 1 => ptr 0x78 [alloc_i1_vec]
+; CHECK-NEXT:   store <4 x i1> <i1 true, i1 false, i1 poison, i1 false>, ptr %alloc_i1_vec, align 1
+; CHECK-NEXT:   %val27 = load <4 x i1>, ptr %alloc_i1_vec, align 1 => { F, F, F, F }
+; CHECK-NEXT:   %val28 = load i8, ptr %alloc_i1_vec, align 1 => poison
+; CHECK-NEXT:   %alloc_padding = alloca i31, align 4 => ptr 0x7C [alloc_padding]
+; CHECK-NEXT:   store i32 0, ptr %alloc_padding, align 4
+; CHECK-NEXT:   %alloc_padding_vec = alloca i64, align 8 => ptr 0x80 [alloc_padding_vec]
+; CHECK-NEXT:   store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1 }, ptr %alloc_padding_vec, align 4
+; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }, i32 -1 }
+; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }
+; CHECK-NEXT:   %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec, align 8 => i33 255
+; CHECK-NEXT:   %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec, align 8 => { i11 255, i11 0, i11 0 }
+; CHECK-NEXT:   ret void
+; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_le.ll b/llvm/test/tools/llubi/loadstore_le.ll
new file mode 100644
index 0000000000000..35e1cd425e72c
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_le.ll
@@ -0,0 +1,192 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: llubi --verbose < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
+
+%struct = type { [2 x i16], i64 }
+%struct.packed = type <{ [2 x i16], i64 }>
+%struct.vscale = type { <vscale x 1 x i32>, <vscale x 1 x i32> }
+
+define void @main() {
+  %alloc = alloca i32
+  store i32 u0x00010203, ptr %alloc
+  %val1 = load i32, ptr %alloc
+  %val2 = load i32, ptr %alloc, align 2
+  %gep = getelementptr i8, ptr %alloc, i64 1
+  %val3 = load i8, ptr %gep
+  %val4 = load <4 x i8>, ptr %alloc
+
+  store i16 u0x0405, ptr %gep, align 1
+  %val5 = load <4 x i8>, ptr %alloc
+  store <2 x i16> <i16 u0x0607, i16 u0x0809>, ptr %alloc
+  %val6 = load <4 x i8>, ptr %alloc
+  %val7 = load <8 x i4>, ptr %alloc
+  store <3 x i3> <i3 1, i3 2, i3 3>, ptr %alloc
+  ; padding bits are undefined.
+  %val8 = load <16 x i1>, ptr %alloc
+  %val9 = load <16 x i1>, ptr %alloc
+  store <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7>, ptr %alloc
+  %val_bitcast = load <3 x i8>, ptr %alloc
+
+  ; For non-byte-sized types, the padding bits must be zero.
+  store i25 -1, ptr %alloc
+  %val10 = load <4 x i8>, ptr %alloc
+  %gep2 = getelementptr i8, ptr %alloc, i64 3
+  store i8 -1, ptr %gep2
+  ; If the padding bits were not zero, load yields poison value.
+  %val11 = load i25, ptr %alloc
+
+  call void @llvm.lifetime.start.p0(ptr poison)
+  call void @llvm.lifetime.end.p0(ptr poison)
+  %alloc_lifetime = alloca i32
+  ; Load of an dead object yields poison value.
+  %val12 = load i32, ptr %alloc_lifetime
+  call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+  ; After lifetime.start, the object is alive but uninitialized.
+  %val13 = load i32, ptr %alloc_lifetime
+  %val14 = load i32, ptr %alloc_lifetime
+  store i32 77, ptr %alloc_lifetime
+  %val15 = load i32, ptr %alloc_lifetime
+  ; Calling lifetime.start on an alive object makes the contents uninitialized again.
+  call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+  %val_undef1 = load i32, ptr %alloc_lifetime
+  %val_undef2 = load i32, ptr %alloc_lifetime
+  call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+  ; Calling lifetime.end on an already dead object is a no-op.
+  call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+  ; Load of an dead object yields poison value.
+  %val16 = load i32, ptr %alloc_lifetime
+
+  store i32 u0xFFF80000, ptr %alloc
+  %val17 = load float, ptr %alloc
+
+  %alloc_vscale = alloca <vscale x 2 x i32>
+  %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0
+  %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %twos = add <vscale x 1 x i32> %ones, %ones
+  store <vscale x 1 x i32> %ones, ptr %alloc_vscale
+  %gep3 = getelementptr <vscale x 1 x i32>, ptr %alloc_vscale, i64 1
+  store <vscale x 1 x i32> %twos, ptr %gep3
+  %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale
+
+  %alloc_struct = alloca %struct
+  store %struct { [2 x i16] [i16 1, i16 2], i64 3 }, ptr %alloc_struct
+  %val19 = load %struct, ptr %alloc_struct
+  ; Padding bytes of struct are undefined.
+  %val20 = load i64, ptr %alloc_struct
+  %val21 = load i64, ptr %alloc_struct
+
+  %alloc_struct_packed = alloca %struct.packed
+  store %struct.packed <{ [2 x i16] [i16 1, i16 2], i64 3 }>, ptr %alloc_struct_packed
+  %val22 = load %struct.packed, ptr %alloc_struct_packed
+  ; No padding bytes.
+  %val23 = load i64, ptr %alloc_struct_packed
+  %val24 = load i64, ptr %alloc_struct_packed
+
+  %alloc_struct_vscale = alloca %struct.vscale
+  store %struct.vscale zeroinitializer, ptr %alloc_struct_vscale
+  %gep4 = getelementptr <vscale x 1 x i32>, ptr %alloc_struct_vscale, i32 1
+  store <vscale x 1 x i32> %ones, ptr %gep4
+  %val25 = load %struct.vscale, ptr %alloc_struct_vscale
+
+  %alloc_array = alloca [2 x i32]
+  store [2 x i32] [i32 1, i32 2], ptr %alloc_array
+  %val26 = load [2 x i32], ptr %alloc_array
+
+  %alloc_i1_vec = alloca <4 x i1>
+  store <4 x i1> <i1 1, i1 0, i1 poison, i1 0>, ptr %alloc_i1_vec
+  %val27 = load <4 x i1>, ptr %alloc_i1_vec
+  %val28 = load i8, ptr %alloc_i1_vec
+
+  %alloc_padding = alloca i31
+  store i32 0, ptr %alloc_padding
+
+  %alloc_padding_vec = alloca i64
+  store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1}, ptr %alloc_padding_vec
+  %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec
+  %load_vec = load <6 x i5>, ptr %alloc_padding_vec
+  %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec
+  %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec
+
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT:   %alloc = alloca i32, align 4 => ptr 0x8 [alloc]
+; CHECK-NEXT:   store i32 66051, ptr %alloc, align 4
+; CHECK-NEXT:   %val1 = load i32, ptr %alloc, align 4 => i32 66051
+; CHECK-NEXT:   %val2 = load i32, ptr %alloc, align 2 => i32 66051
+; CHECK-NEXT:   %gep = getelementptr i8, ptr %alloc, i64 1 => ptr 0x9 [alloc + 1]
+; CHECK-NEXT:   %val3 = load i8, ptr %gep, align 1 => i8 2
+; CHECK-NEXT:   %val4 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 2, i8 1, i8 0 }
+; CHECK-NEXT:   store i16 1029, ptr %gep, align 1
+; CHECK-NEXT:   %val5 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 5, i8 4, i8 0 }
+; CHECK-NEXT:   store <2 x i16> <i16 1543, i16 2057>, ptr %alloc, align 4
+; CHECK-NEXT:   %val6 = load <4 x i8>, ptr %alloc, align 4 => { i8 7, i8 6, i8 9, i8 8 }
+; CHECK-NEXT:   %val7 = load <8 x i4>, ptr %alloc, align 4 => { i4 7, i4 0, i4 6, i4 0, i4 -7, i4 0, i4 -8, i4 0 }
+; CHECK-NEXT:   store <3 x i3> <i3 1, i3 2, i3 3>, ptr %alloc, align 2
+; CHECK-NEXT:   %val8 = load <16 x i1>, ptr %alloc, align 2 => { T, F, F, F, T, F, T, T, F, F, F, F, F, F, F, F }
+; CHECK-NEXT:   %val9 = load <16 x i1>, ptr %alloc, align 2 => { T, F, F, F, T, F, T, T, F, F, F, F, F, F, F, F }
+; CHECK-NEXT:   store <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1>, ptr %alloc, align 4
+; CHECK-NEXT:   %val_bitcast = load <3 x i8>, ptr %alloc, align 4 => { i8 -120, i8 -58, i8 -6 }
+; CHECK-NEXT:   store i25 -1, ptr %alloc, align 4
+; CHECK-NEXT:   %val10 = load <4 x i8>, ptr %alloc, align 4 => { i8 -1, i8 -1, i8 -1, i8 1 }
+; CHECK-NEXT:   %gep2 = getelementptr i8, ptr %alloc, i64 3 => ptr 0xB [alloc + 3]
+; CHECK-NEXT:   store i8 -1, ptr %gep2, align 1
+; CHECK-NEXT:   %val11 = load i25, ptr %alloc, align 4 => poison
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr poison)
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr poison)
+; CHECK-NEXT:   %alloc_lifetime = alloca i32, align 4 => ptr 0xC [alloc_lifetime]
+; CHECK-NEXT:   %val12 = load i32, ptr %alloc_lifetime, align 4 => poison
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val13 = load i32, ptr %alloc_lifetime, align 4 => i32 -289830082
+; CHECK-NEXT:   %val14 = load i32, ptr %alloc_lifetime, align 4 => i32 -399511892
+; CHECK-NEXT:   store i32 77, ptr %alloc_lifetime, align 4
+; CHECK-NEXT:   %val15 = load i32, ptr %alloc_lifetime, align 4 => i32 77
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val_undef1 = load i32, ptr %alloc_lifetime, align 4 => i32 -132966042
+; CHECK-NEXT:   %val_undef2 = load i32, ptr %alloc_lifetime, align 4 => i32 1094190734
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
+; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
+; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => NaN
+; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
+; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
+; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
+; CHECK-NEXT:   %twos = add <vscale x 1 x i32> %ones, %ones => { i32 2, i32 2, i32 2, i32 2 }
+; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %alloc_vscale, align 4
+; CHECK-NEXT:   %gep3 = getelementptr <vscale x 1 x i32>, ptr %alloc_vscale, i64 1 => ptr 0x20 [alloc_vscale + 16]
+; CHECK-NEXT:   store <vscale x 1 x i32> %twos, ptr %gep3, align 4
+; CHECK-NEXT:   %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale, align 8 => { i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2 }
+; CHECK-NEXT:   %alloc_struct = alloca %struct, align 8 => ptr 0x30 [alloc_struct]
+; CHECK-NEXT:   store %struct { [2 x i16] [i16 1, i16 2], i64 3 }, ptr %alloc_struct, align 8
+; CHECK-NEXT:   %val19 = load %struct, ptr %alloc_struct, align 8 => { { i16 1, i16 2 }, i64 3 }
+; CHECK-NEXT:   %val20 = load i64, ptr %alloc_struct, align 8 => i64 -7172745523491635199
+; CHECK-NEXT:   %val21 = load i64, ptr %alloc_struct, align 8 => i64 -3863260483603529727
+; CHECK-NEXT:   %alloc_struct_packed = alloca %struct.packed, align 8 => ptr 0x40 [alloc_struct_packed]
+; CHECK-NEXT:   store %struct.packed <{ [2 x i16] [i16 1, i16 2], i64 3 }>, ptr %alloc_struct_packed, align 1
+; CHECK-NEXT:   %val22 = load %struct.packed, ptr %alloc_struct_packed, align 1 => { { i16 1, i16 2 }, i64 3 }
+; CHECK-NEXT:   %val23 = load i64, ptr %alloc_struct_packed, align 8 => i64 12885032961
+; CHECK-NEXT:   %val24 = load i64, ptr %alloc_struct_packed, align 8 => i64 12885032961
+; CHECK-NEXT:   %alloc_struct_vscale = alloca %struct.vscale, align 8 => ptr 0x50 [alloc_struct_vscale]
+; CHECK-NEXT:   store %struct.vscale zeroinitializer, ptr %alloc_struct_vscale, align 4
+; CHECK-NEXT:   %gep4 = getelementptr <vscale x 1 x i32>, ptr %alloc_struct_vscale, i32 1 => ptr 0x60 [alloc_struct_vscale + 16]
+; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %gep4, align 4
+; CHECK-NEXT:   %val25 = load %struct.vscale, ptr %alloc_struct_vscale, align 4 => { { i32 0, i32 0, i32 0, i32 0 }, { i32 1, i32 1, i32 1, i32 1 } }
+; CHECK-NEXT:   %alloc_array = alloca [2 x i32], align 4 => ptr 0x70 [alloc_array]
+; CHECK-NEXT:   store [2 x i32] [i32 1, i32 2], ptr %alloc_array, align 4
+; CHECK-NEXT:   %val26 = load [2 x i32], ptr %alloc_array, align 4 => { i32 1, i32 2 }
+; CHECK-NEXT:   %alloc_i1_vec = alloca <4 x i1>, align 1 => ptr 0x78 [alloc_i1_vec]
+; CHECK-NEXT:   store <4 x i1> <i1 true, i1 false, i1 poison, i1 false>, ptr %alloc_i1_vec, align 1
+; CHECK-NEXT:   %val27 = load <4 x i1>, ptr %alloc_i1_vec, align 1 => { T, F, poison, F }
+; CHECK-NEXT:   %val28 = load i8, ptr %alloc_i1_vec, align 1 => poison
+; CHECK-NEXT:   %alloc_padding = alloca i31, align 4 => ptr 0x7C [alloc_padding]
+; CHECK-NEXT:   store i32 0, ptr %alloc_padding, align 4
+; CHECK-NEXT:   %alloc_padding_vec = alloca i64, align 8 => ptr 0x80 [alloc_padding_vec]
+; CHECK-NEXT:   store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1 }, ptr %alloc_padding_vec, align 4
+; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }, i32 -1 }
+; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }
+; CHECK-NEXT:   %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec, align 8 => poison
+; CHECK-NEXT:   %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec, align 8 => { poison, poison, poison }
+; CHECK-NEXT:   ret void
+; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_misaligned.ll b/llvm/test/tools/llubi/loadstore_misaligned.ll
new file mode 100644
index 0000000000000..cc80639d773bd
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_misaligned.ll
@@ -0,0 +1,14 @@
+; RUN: sed 's/OP1/store i32 0/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+; RUN: sed 's/OP1/%res = load i32/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+
+define void @main() {
+  %alloc = alloca [2 x i32], align 8
+  %gep = getelementptr inbounds [2 x i32], ptr %alloc, i64 0, i64 1
+  OP1, ptr %gep, align 8
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT: %alloc = alloca [2 x i32], align 8 => ptr 0x8 [alloc]
+; CHECK-NEXT: %gep = getelementptr inbounds [2 x i32], ptr %alloc, i64 0, i64 1 => ptr 0xC [alloc + 4]
+; CHECK-NEXT: Immediate UB detected: Misaligned memory access.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/test/tools/llubi/loadstore_null.ll b/llvm/test/tools/llubi/loadstore_null.ll
new file mode 100644
index 0000000000000..7a243784e6bd6
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_null.ll
@@ -0,0 +1,10 @@
+; RUN: sed 's/OP1/store i32 0/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+; RUN: sed 's/OP1/%res = load i32/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+
+define void @main() {
+  OP1, ptr null
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT: Immediate UB detected: Invalid memory access via a pointer with nullary provenance.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/test/tools/llubi/loadstore_oob1.ll b/llvm/test/tools/llubi/loadstore_oob1.ll
new file mode 100644
index 0000000000000..0618faa96bfde
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_oob1.ll
@@ -0,0 +1,14 @@
+; RUN: sed 's/OP1/store i32 0/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+; RUN: sed 's/OP1/%res = load i32/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+
+define void @main() {
+  %alloc = alloca [2 x i32]
+  %gep = getelementptr inbounds [2 x i32], ptr %alloc, i64 0, i64 2
+  OP1, ptr %gep
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT: %alloc = alloca [2 x i32], align 4 => ptr 0x8 [alloc]
+; CHECK-NEXT: %gep = getelementptr inbounds [2 x i32], ptr %alloc, i64 0, i64 2 => ptr 0x10 [alloc + 8]
+; CHECK-NEXT: Immediate UB detected: Memory access is out of bounds.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/test/tools/llubi/loadstore_overaligned.ll b/llvm/test/tools/llubi/loadstore_overaligned.ll
new file mode 100644
index 0000000000000..5933ec4399fbb
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_overaligned.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: llubi --verbose < %s 2>&1 | FileCheck %s
+
+; For over-aligned arrays, the padding bytes between elements should be filled with undef.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:64:64-i64:64:64"
+
+define void @main() {
+  %alloca = alloca [2 x i32]
+  store <4 x i32> zeroinitializer, ptr %alloca, align 8
+  store [2 x i32] [i32 1, i32 1], ptr %alloca, align 8
+  ; The padding bytes in the middle are filled with undef,
+  ; while the tail padding bytes are still zero-initialized.
+  %load1 = load <4 x i32>, ptr %alloca, align 8
+  %load2 = load <4 x i32>, ptr %alloca, align 8
+  %load3 = load <4 x i32>, ptr %alloca, align 8
+  %load_arr = load [2 x i32], ptr %alloca, align 8
+  ret void
+}
+
+; CHECK: Entering function: main
+; CHECK-NEXT:   %alloca = alloca [2 x i32], align 8 => ptr 0x8 [alloca]
+; CHECK-NEXT:   store <4 x i32> zeroinitializer, ptr %alloca, align 8
+; CHECK-NEXT:   store [2 x i32] [i32 1, i32 1], ptr %alloca, align 8
+; CHECK-NEXT:   %load1 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -289830082, i32 1, i32 0 }
+; CHECK-NEXT:   %load2 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -399511892, i32 1, i32 0 }
+; CHECK-NEXT:   %load3 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -132966042, i32 1, i32 0 }
+; CHECK-NEXT:   %load_arr = load [2 x i32], ptr %alloca, align 8 => { i32 1, i32 1 }
+; CHECK-NEXT:   ret void
+; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_poison.ll b/llvm/test/tools/llubi/loadstore_poison.ll
new file mode 100644
index 0000000000000..44c72aa803709
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_poison.ll
@@ -0,0 +1,10 @@
+; RUN: sed 's/OP1/store i32 0/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+; RUN: sed 's/OP1/%res = load i32/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+
+define void @main() {
+  OP1, ptr poison
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT: Immediate UB detected: Invalid memory access with a poison pointer.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/test/tools/llubi/loadstore_uaf.ll b/llvm/test/tools/llubi/loadstore_uaf.ll
new file mode 100644
index 0000000000000..21b86552275b6
--- /dev/null
+++ b/llvm/test/tools/llubi/loadstore_uaf.ll
@@ -0,0 +1,21 @@
+; RUN: sed 's/OP1/store i32 0/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+; RUN: sed 's/OP1/%res = load i32/g' %s | not llubi --verbose 2>&1 | FileCheck %s
+
+define ptr @stack_object() {
+  %alloc = alloca i32
+  ret ptr %alloc
+}
+
+define void @main() {
+  %alloc = call ptr @stack_object()
+  OP1, ptr %alloc
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT: Entering function: stack_object
+; CHECK-NEXT:   %alloc = alloca i32, align 4 => ptr 0x8 [alloc]
+; CHECK-NEXT:   ret ptr %alloc
+; CHECK-NEXT: Exiting function: stack_object
+; CHECK-NEXT:   %alloc = call ptr @stack_object() => ptr 0x8 [dangling]
+; CHECK-NEXT: Immediate UB detected: Try to access a dead memory object.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/test/tools/llubi/store_dead.ll b/llvm/test/tools/llubi/store_dead.ll
new file mode 100644
index 0000000000000..8bd15beefb8d3
--- /dev/null
+++ b/llvm/test/tools/llubi/store_dead.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: not llubi --verbose < %s 2>&1 | FileCheck %s
+
+define void @main() {
+  %alloc = alloca i32
+  call void @llvm.lifetime.start.p0(ptr %alloc)
+  store i32 0, ptr %alloc
+  call void @llvm.lifetime.end.p0(ptr %alloc)
+  store i32 0, ptr %alloc
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT:   %alloc = alloca i32, align 4 => ptr 0x8 [alloc]
+; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr %alloc)
+; CHECK-NEXT:   store i32 0, ptr %alloc, align 4
+; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc)
+; CHECK-NEXT: Immediate UB detected: Try to access a dead memory object.
+; CHECK-NEXT: error: Execution of function 'main' failed.
diff --git a/llvm/tools/llubi/lib/Context.cpp b/llvm/tools/llubi/lib/Context.cpp
index adedfbcc3886b..0ff685c05643c 100644
--- a/llvm/tools/llubi/lib/Context.cpp
+++ b/llvm/tools/llubi/lib/Context.cpp
@@ -103,6 +103,300 @@ const AnyValue &Context::getConstantValue(Constant *C) {
   return ConstCache.emplace(C, getConstantValueImpl(C)).first->second;
 }
 
+AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty,
+                            uint32_t &OffsetInBits, bool CheckPaddingBits) {
+  if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy()) {
+    uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
+    uint32_t NewOffsetInBits = OffsetInBits + NumBits;
+    if (CheckPaddingBits)
+      NewOffsetInBits = alignTo(NewOffsetInBits, 8);
+    bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
+    uint32_t NumBitsToExtract = NewOffsetInBits - OffsetInBits;
+    SmallVector<uint64_t> RawBits(alignTo(NumBitsToExtract, 8));
+    for (uint32_t I = 0; I < NumBitsToExtract; I += 8) {
+      // Try to form a 'logical' byte that represents the bits in the range
+      // [BitsStart, BitsEnd].
+      uint32_t NumBitsInByte = std::min(8U, NumBitsToExtract - I);
+      uint32_t BitsStart =
+          OffsetInBits +
+          (DL.isLittleEndian() ? I : (NumBitsToExtract - NumBitsInByte - I));
+      uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
+      Byte LogicalByte;
+      // Check whether it is a cross-byte access.
+      if (((BitsStart ^ BitsEnd) & ~7) == 0)
+        LogicalByte = Bytes[BitsStart / 8].lshr(BitsStart % 8);
+      else
+        LogicalByte =
+            Byte::fshr(Bytes[BitsStart / 8], Bytes[BitsEnd / 8], BitsStart % 8);
+
+      uint32_t Mask = (1U << NumBitsInByte) - 1;
+      // If any of the bits in the byte is poison, the whole value is poison.
+      if (~LogicalByte.ConcreteMask & ~LogicalByte.Value & Mask) {
+        OffsetInBits = NewOffsetInBits;
+        return AnyValue::poison();
+      }
+      uint8_t RandomBits = 0;
+      if (UndefBehavior == UndefValueBehavior::NonDeterministic &&
+          (~LogicalByte.ConcreteMask & Mask)) {
+        // This byte contains undef bits.
+        // We don't use std::uniform_int_distribution here because it produces
+        // different results across different library implementations. Instead,
+        // we directly use the low bits from Rng.
+        RandomBits = static_cast<uint8_t>(Rng());
+      }
+      uint8_t ActualBits = ((LogicalByte.Value & LogicalByte.ConcreteMask) |
+                            (RandomBits & ~LogicalByte.ConcreteMask)) &
+                           Mask;
+      RawBits[I / 64] |= static_cast<APInt::WordType>(ActualBits) << (I % 64);
+    }
+    OffsetInBits = NewOffsetInBits;
+
+    APInt Bits(NumBitsToExtract, RawBits);
+
+    // Padding bits for non-byte-sized scalar types must be zero.
+    if (NeedsPadding) {
+      if (!Bits.isIntN(NumBits))
+        return AnyValue::poison();
+      Bits = Bits.trunc(NumBits);
+    }
+
+    if (Ty->isIntegerTy())
+      return Bits;
+    if (Ty->isFloatingPointTy())
+      return APFloat(Ty->getFltSemantics(), Bits);
+    assert(Ty->isPointerTy() && "Expect a pointer type");
+    // TODO: recover provenance
+    return Pointer(Bits);
+  }
+
+  assert(OffsetInBits % 8 == 0 && "Missing padding bits.");
+  if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
+    Type *ElemTy = VecTy->getElementType();
+    uint32_t ElemBits = DL.getTypeSizeInBits(ElemTy).getFixedValue();
+    uint32_t NumElements = getEVL(VecTy->getElementCount());
+    // Check padding bits. <N x iM> acts as if an integer type with N * M bits.
+    uint32_t NewOffsetInBits = OffsetInBits + ElemBits * NumElements;
+    uint32_t AlignedNewOffsetInBits = alignTo(NewOffsetInBits, 8);
+    if (NewOffsetInBits != AlignedNewOffsetInBits) {
+      assert(NewOffsetInBits % 8 != 0 &&
+             AlignedNewOffsetInBits - NewOffsetInBits < 8 &&
+             "Unexpected offset.");
+      // The padding bits are located in the last byte on little-endian systems.
+      // On big-endian systems, the padding bits are located in the first byte.
+      const Byte &PaddingByte =
+          Bytes[(DL.isBigEndian() ? OffsetInBits : NewOffsetInBits) / 8];
+      uint32_t Mask = (~0U << (NewOffsetInBits % 8)) & 255U;
+      // Make sure all high padding bits are zero.
+      if ((PaddingByte.ConcreteMask & ~PaddingByte.Value & Mask) != Mask) {
+        OffsetInBits = AlignedNewOffsetInBits;
+        return AnyValue::getPoisonValue(*this, Ty);
+      }
+      if (DL.isBigEndian())
+        OffsetInBits += AlignedNewOffsetInBits - NewOffsetInBits;
+    }
+
+    std::vector<AnyValue> ValVec;
+    ValVec.reserve(NumElements);
+    for (uint32_t I = 0; I != NumElements; ++I)
+      ValVec.push_back(
+          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/false));
+    if (DL.isBigEndian())
+      std::reverse(ValVec.begin(), ValVec.end());
+    OffsetInBits = AlignedNewOffsetInBits;
+    return AnyValue(std::move(ValVec));
+  }
+  if (auto *ArrTy = dyn_cast<ArrayType>(Ty)) {
+    Type *ElemTy = ArrTy->getElementType();
+    uint32_t StrideInBits = getEffectiveTypeAllocSize(ElemTy) * 8;
+    std::vector<AnyValue> ValVec;
+    uint32_t NumElements = ArrTy->getNumElements();
+    ValVec.reserve(NumElements);
+    uint32_t BaseOffsetInBits = OffsetInBits;
+    for (uint32_t I = 0; I != NumElements; ++I) {
+      OffsetInBits = BaseOffsetInBits + I * StrideInBits;
+      ValVec.push_back(
+          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/true));
+    }
+    return AnyValue(std::move(ValVec));
+  }
+  if (auto *StructTy = dyn_cast<StructType>(Ty)) {
+    auto *Layout = DL.getStructLayout(StructTy);
+    uint32_t BaseOffsetInBits = OffsetInBits;
+    std::vector<AnyValue> ValVec;
+    uint32_t NumElements = StructTy->getNumElements();
+    ValVec.reserve(NumElements);
+    for (uint32_t I = 0; I != NumElements; ++I) {
+      Type *ElemTy = StructTy->getElementType(I);
+      TypeSize ElemOffset = Layout->getElementOffset(I);
+      OffsetInBits = BaseOffsetInBits + getEffectiveTypeSize(ElemOffset) * 8;
+      ValVec.push_back(
+          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/true));
+    }
+    OffsetInBits =
+        BaseOffsetInBits +
+        static_cast<uint32_t>(getEffectiveTypeStoreSize(StructTy)) * 8;
+    return AnyValue(std::move(ValVec));
+  }
+  llvm_unreachable("Unsupported first class type.");
+}
+
+void Context::toBytes(const AnyValue &Val, Type *Ty, uint32_t &OffsetInBits,
+                      MutableArrayRef<Byte> Bytes, bool PaddingBits) {
+  if (Val.isPoison() || Ty->isIntegerTy() || Ty->isFloatingPointTy() ||
+      Ty->isPointerTy()) {
+    uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
+    uint32_t NewOffsetInBits = OffsetInBits + NumBits;
+    if (PaddingBits)
+      NewOffsetInBits = alignTo(NewOffsetInBits, 8);
+    bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
+    auto WriteBits = [&](const APInt &Bits) {
+      for (uint32_t I = 0, E = Bits.getBitWidth(); I < E; I += 8) {
+        uint32_t NumBitsInByte = std::min(8U, E - I);
+        uint32_t BitsStart =
+            OffsetInBits + (DL.isLittleEndian() ? I : (E - NumBitsInByte - I));
+        uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
+        uint8_t BitsVal =
+            static_cast<uint8_t>(Bits.extractBitsAsZExtValue(NumBitsInByte, I));
+
+        Bytes[BitsStart / 8].writeBits(
+            static_cast<uint8_t>(((1U << NumBitsInByte) - 1)
+                                 << (BitsStart % 8)),
+            static_cast<uint8_t>(BitsVal << (BitsStart % 8)));
+        // If it is a cross-byte access, write the remaining bits to the next
+        // byte.
+        if (((BitsStart ^ BitsEnd) & ~7) != 0)
+          Bytes[BitsEnd / 8].writeBits(
+              static_cast<uint8_t>((1U << (BitsEnd % 8 + 1)) - 1),
+              static_cast<uint8_t>(BitsVal >> (8 - (BitsStart % 8))));
+      }
+    };
+    if (Val.isPoison()) {
+      for (uint32_t I = 0, E = NewOffsetInBits - OffsetInBits; I < E;) {
+        uint32_t NumBitsInByte = std::min(8 - (OffsetInBits + I) % 8, E - I);
+        assert(((OffsetInBits ^ (OffsetInBits + NumBitsInByte - 1)) & ~7) ==
+                   0 &&
+               "Across byte boundary.");
+        Bytes[(OffsetInBits + I) / 8].poisonBits(static_cast<uint8_t>(
+            ((1U << NumBitsInByte) - 1) << ((OffsetInBits + I) % 8)));
+        I += NumBitsInByte;
+      }
+    } else if (Ty->isIntegerTy()) {
+      auto &Bits = Val.asInteger();
+      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
+                             : Bits);
+    } else if (Ty->isFloatingPointTy()) {
+      auto Bits = Val.asFloat().bitcastToAPInt();
+      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
+                             : Bits);
+    } else if (Ty->isPointerTy()) {
+      auto &Bits = Val.asPointer().address();
+      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
+                             : Bits);
+      // TODO: save metadata of the pointer.
+    } else {
+      llvm_unreachable("Unsupported scalar type.");
+    }
+    OffsetInBits = NewOffsetInBits;
+    return;
+  }
+
+  assert(OffsetInBits % 8 == 0 && "Missing padding bits.");
+  if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
+    Type *ElemTy = VecTy->getElementType();
+    auto &ValVec = Val.asAggregate();
+    uint32_t NewOffsetInBits =
+        alignTo(OffsetInBits + DL.getTypeSizeInBits(ElemTy).getFixedValue() *
+                                   ValVec.size(),
+                8);
+    if (DL.isLittleEndian()) {
+      for (const auto &SubVal : ValVec)
+        toBytes(SubVal, ElemTy, OffsetInBits, Bytes,
+                /*PaddingBits=*/false);
+    } else {
+      for (const auto &SubVal : reverse(ValVec))
+        toBytes(SubVal, ElemTy, OffsetInBits, Bytes,
+                /*PaddingBits=*/false);
+    }
+    if (NewOffsetInBits != OffsetInBits) {
+      assert(OffsetInBits % 8 != 0 && NewOffsetInBits - OffsetInBits < 8 &&
+             "Unexpected offset.");
+      // Fill remaining bits with zero.
+      Bytes[OffsetInBits / 8].zeroBits(
+          static_cast<uint8_t>(~0U << (OffsetInBits % 8)));
+    }
+    OffsetInBits = NewOffsetInBits;
+    return;
+  }
+  auto FillUndefBytes = [&](uint32_t NewOffsetInBits) {
+    if (OffsetInBits == NewOffsetInBits)
+      return;
+    // Fill padding bits due to alignment requirement.
+    assert(NewOffsetInBits > OffsetInBits &&
+           "Unexpected negative padding bits!");
+    fill(Bytes.slice(OffsetInBits / 8, (NewOffsetInBits - OffsetInBits) / 8),
+         Byte::undef());
+    OffsetInBits = NewOffsetInBits;
+  };
+  if (auto *ArrTy = dyn_cast<ArrayType>(Ty)) {
+    Type *ElemTy = ArrTy->getElementType();
+    uint32_t CurrentOffsetInBits = OffsetInBits;
+    uint32_t StrideInBits = getEffectiveTypeAllocSize(ElemTy) * 8;
+    for (const auto &SubVal : Val.asAggregate()) {
+      FillUndefBytes(CurrentOffsetInBits);
+      toBytes(SubVal, ElemTy, OffsetInBits, Bytes, /*PaddingBits=*/true);
+      CurrentOffsetInBits += StrideInBits;
+    }
+    return;
+  }
+  if (auto *StructTy = dyn_cast<StructType>(Ty)) {
+    auto *Layout = DL.getStructLayout(StructTy);
+    uint32_t BaseOffsetInBits = OffsetInBits;
+    for (uint32_t I = 0, E = Val.asAggregate().size(); I != E; ++I) {
+      Type *ElemTy = StructTy->getElementType(I);
+      TypeSize ElemOffset = Layout->getElementOffset(I);
+      uint32_t NewOffsetInBits =
+          BaseOffsetInBits + getEffectiveTypeSize(ElemOffset) * 8;
+      FillUndefBytes(NewOffsetInBits);
+      toBytes(Val.asAggregate()[I], ElemTy, OffsetInBits, Bytes,
+              /*PaddingBits=*/true);
+    }
+    uint32_t NewOffsetInBits =
+        BaseOffsetInBits + getEffectiveTypeStoreSize(StructTy) * 8;
+    FillUndefBytes(NewOffsetInBits);
+    return;
+  }
+
+  llvm_unreachable("Unsupported first class type.");
+}
+
+AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty) {
+  uint32_t OffsetInBits = 0;
+  return fromBytes(Bytes, Ty, OffsetInBits, /*CheckPaddingBits=*/true);
+}
+
+void Context::toBytes(const AnyValue &Val, Type *Ty,
+                      MutableArrayRef<Byte> Bytes) {
+  uint32_t OffsetInBits = 0;
+  toBytes(Val, Ty, OffsetInBits, Bytes, /*PaddingBits=*/true);
+}
+
+AnyValue Context::load(MemoryObject &MO, uint64_t Offset, Type *ValTy) {
+  return fromBytes(
+      MO.getBytes().slice(Offset, getEffectiveTypeStoreSize(ValTy)), ValTy);
+}
+
+void Context::store(MemoryObject &MO, uint64_t Offset, const AnyValue &Val,
+                    Type *ValTy) {
+  toBytes(Val, ValTy,
+          MO.getBytes().slice(Offset, getEffectiveTypeStoreSize(ValTy)));
+}
+
+void Context::storeRawBytes(MemoryObject &MO, uint64_t Offset, const void *Data,
+                            uint64_t Size) {
+  for (uint64_t I = 0; I != Size; ++I)
+    MO[Offset + I] = Byte::concrete(static_cast<const uint8_t *>(Data)[I]);
+}
+
 MemoryObject::~MemoryObject() = default;
 MemoryObject::MemoryObject(uint64_t Addr, uint64_t Size, StringRef Name,
                            unsigned AS, MemInitKind InitKind)
@@ -111,13 +405,13 @@ MemoryObject::MemoryObject(uint64_t Addr, uint64_t Size, StringRef Name,
                                               : MemoryObjectState::Dead) {
   switch (InitKind) {
   case MemInitKind::Zeroed:
-    Bytes.resize(Size, Byte{0, ByteKind::Concrete});
+    Bytes.resize(Size, Byte::concrete(0));
     break;
   case MemInitKind::Uninitialized:
-    Bytes.resize(Size, Byte{0, ByteKind::Undef});
+    Bytes.resize(Size, Byte::undef());
     break;
   case MemInitKind::Poisoned:
-    Bytes.resize(Size, Byte{0, ByteKind::Poison});
+    Bytes.resize(Size, Byte::poison());
     break;
   }
 }
@@ -175,38 +469,17 @@ BasicBlock *Context::getTargetBlock(const Pointer &Ptr) {
   return It->second.first;
 }
 
-void MemoryObject::markAsFreed() {
-  State = MemoryObjectState::Freed;
-  Bytes.clear();
+uint64_t Context::getEffectiveTypeAllocSize(Type *Ty) {
+  // FIXME: It is incorrect for overaligned scalable vector types.
+  return getEffectiveTypeSize(DL.getTypeAllocSize(Ty));
 }
-
-void MemoryObject::writeRawBytes(uint64_t Offset, const void *Data,
-                                 uint64_t Length) {
-  assert(SaturatingAdd(Offset, Length) <= Size && "Write out of bounds");
-  const uint8_t *ByteData = static_cast<const uint8_t *>(Data);
-  for (uint64_t I = 0; I < Length; ++I)
-    Bytes[Offset + I].set(ByteData[I]);
+uint64_t Context::getEffectiveTypeStoreSize(Type *Ty) {
+  return getEffectiveTypeSize(DL.getTypeStoreSize(Ty));
 }
 
-void MemoryObject::writeInteger(uint64_t Offset, const APInt &Int,
-                                const DataLayout &DL) {
-  uint64_t BitWidth = Int.getBitWidth();
-  uint64_t IntSize = divideCeil(BitWidth, 8);
-  assert(SaturatingAdd(Offset, IntSize) <= Size && "Write out of bounds");
-  for (uint64_t I = 0; I < IntSize; ++I) {
-    uint64_t ByteIndex = DL.isLittleEndian() ? I : (IntSize - 1 - I);
-    uint64_t Bits = std::min(BitWidth - ByteIndex * 8, uint64_t(8));
-    Bytes[Offset + I].set(Int.extractBitsAsZExtValue(Bits, ByteIndex * 8));
-  }
-}
-void MemoryObject::writeFloat(uint64_t Offset, const APFloat &Float,
-                              const DataLayout &DL) {
-  writeInteger(Offset, Float.bitcastToAPInt(), DL);
-}
-void MemoryObject::writePointer(uint64_t Offset, const Pointer &Ptr,
-                                const DataLayout &DL) {
-  writeInteger(Offset, Ptr.address(), DL);
-  // TODO: provenance
+void MemoryObject::markAsFreed() {
+  State = MemoryObjectState::Freed;
+  Bytes.clear();
 }
 
 } // namespace llvm::ubi
diff --git a/llvm/tools/llubi/lib/Context.h b/llvm/tools/llubi/lib/Context.h
index 25ba940323ebf..a250004b3cb54 100644
--- a/llvm/tools/llubi/lib/Context.h
+++ b/llvm/tools/llubi/lib/Context.h
@@ -14,6 +14,7 @@
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/IR/Module.h"
 #include <map>
+#include <random>
 
 namespace llvm::ubi {
 
@@ -41,6 +42,11 @@ enum class MemoryObjectState {
   Freed,
 };
 
+enum class UndefValueBehavior {
+  NonDeterministic, // Each use of the undef value can yield different results.
+  Zero,             // All uses of the undef value yield zero.
+};
+
 class MemoryObject : public RefCountedBase<MemoryObject> {
   uint64_t Address;
   uint64_t Size;
@@ -65,6 +71,7 @@ class MemoryObject : public RefCountedBase<MemoryObject> {
   StringRef getName() const { return Name; }
   unsigned getAddressSpace() const { return AS; }
   MemoryObjectState getState() const { return State; }
+  void setState(MemoryObjectState S) { State = S; }
   bool isConstant() const { return IsConstant; }
   void setIsConstant(bool C) { IsConstant = C; }
 
@@ -76,10 +83,8 @@ class MemoryObject : public RefCountedBase<MemoryObject> {
     assert(Offset < Size && "Offset out of bounds");
     return Bytes[Offset];
   }
-  void writeRawBytes(uint64_t Offset, const void *Data, uint64_t Length);
-  void writeInteger(uint64_t Offset, const APInt &Int, const DataLayout &DL);
-  void writeFloat(uint64_t Offset, const APFloat &Float, const DataLayout &DL);
-  void writePointer(uint64_t Offset, const Pointer &Ptr, const DataLayout &DL);
+  ArrayRef<Byte> getBytes() const { return Bytes; }
+  MutableArrayRef<Byte> getBytes() { return Bytes; }
 
   void markAsFreed();
 };
@@ -126,6 +131,9 @@ class Context {
   uint32_t VScale = 4;
   uint32_t MaxSteps = 0;
   uint32_t MaxStackDepth = 256;
+  UndefValueBehavior UndefBehavior = UndefValueBehavior::NonDeterministic;
+
+  std::mt19937_64 Rng;
 
   // Memory
   uint64_t UsedMem = 0;
@@ -141,6 +149,10 @@ class Context {
   // precisely after we make ptrtoint have the implicit side-effect of exposing
   // the provenance.
   std::map<uint64_t, IntrusiveRefCntPtr<MemoryObject>> MemoryObjects;
+  AnyValue fromBytes(ArrayRef<Byte> Bytes, Type *Ty, uint32_t &OffsetInBits,
+                     bool CheckPaddingBits);
+  void toBytes(const AnyValue &Val, Type *Ty, uint32_t &OffsetInBits,
+               MutableArrayRef<Byte> Bytes, bool PaddingBits);
 
   // Constants
   // Use std::map to avoid iterator/reference invalidation.
@@ -171,15 +183,29 @@ class Context {
   uint32_t getVScale() const { return VScale; }
   uint32_t getMaxSteps() const { return MaxSteps; }
   uint32_t getMaxStackDepth() const { return MaxStackDepth; }
+  void setUndefValueBehavior(UndefValueBehavior UB) { UndefBehavior = UB; }
+  void reseed(uint32_t Seed) { Rng.seed(Seed); }
 
   LLVMContext &getContext() const { return Ctx; }
   const DataLayout &getDataLayout() const { return DL; }
   const TargetLibraryInfoImpl &getTLIImpl() const { return TLIImpl; }
+  /// Get the effective vector length for a vector type.
   uint32_t getEVL(ElementCount EC) const {
     if (EC.isScalable())
       return VScale * EC.getKnownMinValue();
     return EC.getFixedValue();
   }
+  /// The result is multiplied by VScale for scalable type sizes.
+  uint64_t getEffectiveTypeSize(TypeSize Size) const {
+    if (Size.isScalable())
+      return VScale * Size.getKnownMinValue();
+    return Size.getFixedValue();
+  }
+  /// Returns DL.getTypeAllocSize/getTypeStoreSize for the given type.
+  /// An exception to this is that for scalable vector types, the size is
+  /// computed as if the vector has getEVL(ElementCount) elements.
+  uint64_t getEffectiveTypeAllocSize(Type *Ty);
+  uint64_t getEffectiveTypeStoreSize(Type *Ty);
 
   const AnyValue &getConstantValue(Constant *C);
   IntrusiveRefCntPtr<MemoryObject> allocate(uint64_t Size, uint64_t Align,
@@ -189,6 +215,18 @@ class Context {
   /// Derive a pointer from a memory object with offset 0.
   /// Please use Pointer's interface for further manipulations.
   Pointer deriveFromMemoryObject(IntrusiveRefCntPtr<MemoryObject> Obj);
+  /// Convert byte sequence to a value of the given type. Uninitialized bits are
+  /// flushed according to the options.
+  AnyValue fromBytes(ArrayRef<Byte> Bytes, Type *Ty);
+  /// Convert a value to byte sequence. Padding bits are set to zero.
+  void toBytes(const AnyValue &Val, Type *Ty, MutableArrayRef<Byte> Bytes);
+  /// Direct memory load without checks.
+  AnyValue load(MemoryObject &MO, uint64_t Offset, Type *ValTy);
+  /// Direct memory store without checks.
+  void store(MemoryObject &MO, uint64_t Offset, const AnyValue &Val,
+             Type *ValTy);
+  void storeRawBytes(MemoryObject &MO, uint64_t Offset, const void *Data,
+                     uint64_t Size);
 
   Function *getTargetFunction(const Pointer &Ptr);
   BasicBlock *getTargetBlock(const Pointer &Ptr);
diff --git a/llvm/tools/llubi/lib/Interpreter.cpp b/llvm/tools/llubi/lib/Interpreter.cpp
index 7961f7551cf48..dd5530a355538 100644
--- a/llvm/tools/llubi/lib/Interpreter.cpp
+++ b/llvm/tools/llubi/lib/Interpreter.cpp
@@ -16,10 +16,13 @@
 #include "llvm/IR/InlineAsm.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/Support/Allocator.h"
 
 namespace llvm::ubi {
 
+using namespace PatternMatch;
+
 enum class FrameState {
   // It is about to enter the function.
   // Valid transition:
@@ -248,6 +251,88 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
     return false;
   }
 
+  /// Check if the upcoming memory access is valid. Returns the offset relative
+  /// to the underlying object if it is valid.
+  std::optional<uint64_t> verifyMemAccess(const MemoryObject &MO,
+                                          const APInt &Address,
+                                          uint64_t AccessSize, Align Alignment,
+                                          bool IsStore) {
+    // Loading from a stack object outside its lifetime is not undefined
+    // behavior and returns a poison value instead. Storing to it is still
+    // undefined behavior.
+    if (IsStore ? MO.getState() != MemoryObjectState::Alive
+                : MO.getState() == MemoryObjectState::Freed) {
+      reportImmediateUB("Try to access a dead memory object.");
+      return std::nullopt;
+    }
+
+    if (Address.countr_zero() < Log2(Alignment)) {
+      reportImmediateUB("Misaligned memory access.");
+      return std::nullopt;
+    }
+
+    if (AccessSize > MO.getSize() || Address.ult(MO.getAddress())) {
+      reportImmediateUB("Memory access is out of bounds.");
+      return std::nullopt;
+    }
+
+    APInt Offset = Address - MO.getAddress();
+
+    if (Offset.ugt(MO.getSize() - AccessSize)) {
+      reportImmediateUB("Memory access is out of bounds.");
+      return std::nullopt;
+    }
+
+    return Offset.getZExtValue();
+  }
+
+  AnyValue load(const AnyValue &Ptr, Align Alignment, Type *ValTy) {
+    if (Ptr.isPoison()) {
+      reportImmediateUB("Invalid memory access with a poison pointer.");
+      return AnyValue::getPoisonValue(Ctx, ValTy);
+    }
+    auto &PtrVal = Ptr.asPointer();
+    auto *MO = PtrVal.getMemoryObject();
+    if (!MO) {
+      reportImmediateUB(
+          "Invalid memory access via a pointer with nullary provenance.");
+      return AnyValue::getPoisonValue(Ctx, ValTy);
+    }
+    // TODO: pointer capability check
+    if (auto Offset =
+            verifyMemAccess(*MO, PtrVal.address(),
+                            Ctx.getEffectiveTypeStoreSize(ValTy), Alignment,
+                            /*IsStore=*/false)) {
+      // Load from a dead stack object yields poison value.
+      if (MO->getState() == MemoryObjectState::Dead)
+        return AnyValue::getPoisonValue(Ctx, ValTy);
+
+      return Ctx.load(*MO, *Offset, ValTy);
+    }
+    return AnyValue::getPoisonValue(Ctx, ValTy);
+  }
+
+  void store(const AnyValue &Ptr, Align Alignment, const AnyValue &Val,
+             Type *ValTy) {
+    if (Ptr.isPoison()) {
+      reportImmediateUB("Invalid memory access with a poison pointer.");
+      return;
+    }
+    auto &PtrVal = Ptr.asPointer();
+    auto *MO = PtrVal.getMemoryObject();
+    if (!MO) {
+      reportImmediateUB(
+          "Invalid memory access via a pointer with nullary provenance.");
+      return;
+    }
+    // TODO: pointer capability check
+    if (auto Offset =
+            verifyMemAccess(*MO, PtrVal.address(),
+                            Ctx.getEffectiveTypeStoreSize(ValTy), Alignment,
+                            /*IsStore=*/true))
+      Ctx.store(*MO, *Offset, Val, ValTy);
+  }
+
   AnyValue computePtrAdd(const Pointer &Ptr, const APInt &Offset,
                          GEPNoWrapFlags Flags, AnyValue &AccumulatedOffset) {
     if (Offset.isZero())
@@ -438,6 +523,22 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
       }
       // TODO: handle llvm.assume with operand bundles
       return AnyValue();
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end: {
+      auto *Ptr = CB.getArgOperand(0);
+      if (isa<PoisonValue>(Ptr))
+        return AnyValue();
+      auto *MO = getValue(Ptr).asPointer().getMemoryObject();
+      assert(MO && "Memory object accessed by lifetime intrinsic should be "
+                   "always valid.");
+      if (IID == Intrinsic::lifetime_start) {
+        MO->setState(MemoryObjectState::Alive);
+        fill(MO->getBytes(), Byte::undef());
+      } else {
+        MO->setState(MemoryObjectState::Dead);
+      }
+      return AnyValue();
+    }
     default:
       Handler.onUnrecognizedInstruction(CB);
       Status = false;
@@ -799,8 +900,7 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
   }
 
   void visitAllocaInst(AllocaInst &AI) {
-    uint64_t AllocSize =
-        DL.getTypeAllocSize(AI.getAllocatedType()).getFixedValue();
+    uint64_t AllocSize = Ctx.getEffectiveTypeAllocSize(AI.getAllocatedType());
     if (AI.isArrayAllocation()) {
       auto &Size = getValue(AI.getArraySize());
       if (Size.isPoison()) {
@@ -821,10 +921,14 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
         return;
       }
     }
-    // FIXME: If it is used by llvm.lifetime.start, it should be initially dead.
+    // If it is used by llvm.lifetime.start, it should be initially dead.
+    bool IsInitiallyDead = any_of(AI.users(), [](User *U) {
+      return match(U, m_Intrinsic<Intrinsic::lifetime_start>());
+    });
     auto Obj = Ctx.allocate(AllocSize, AI.getPointerAlignment(DL).value(),
                             AI.getName(), AI.getAddressSpace(),
-                            MemInitKind::Uninitialized);
+                            IsInitiallyDead ? MemInitKind::Poisoned
+                                            : MemInitKind::Uninitialized);
     if (!Obj) {
       reportError("Insufficient stack space.");
       return;
@@ -897,10 +1001,7 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
       // TODO: Should be documented in LangRef: GEPs with nowrap flags should
       // return poison when the type size exceeds index space.
       TypeSize Offset = GTI.getSequentialElementStride(DL);
-      APInt Scale(IndexBitWidth,
-                  Offset.isScalable()
-                      ? Offset.getKnownMinValue() * Ctx.getVScale()
-                      : Offset.getFixedValue(),
+      APInt Scale(IndexBitWidth, Ctx.getEffectiveTypeSize(Offset),
                   /*isSigned=*/false, /*implicitTrunc=*/true);
       if (!Scale.isZero())
         ApplyScaledOffset(getValue(V), Scale);
@@ -920,6 +1021,24 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
     });
   }
 
+  void visitLoadInst(LoadInst &LI) {
+    auto RetVal =
+        load(getValue(LI.getPointerOperand()), LI.getAlign(), LI.getType());
+    // TODO: track volatile loads
+    // TODO: handle metadata
+    setResult(LI, std::move(RetVal));
+  }
+
+  void visitStoreInst(StoreInst &SI) {
+    auto &Ptr = getValue(SI.getPointerOperand());
+    auto &Val = getValue(SI.getValueOperand());
+    // TODO: track volatile stores
+    // TODO: handle metadata
+    store(Ptr, SI.getAlign(), Val, SI.getValueOperand()->getType());
+    if (Status)
+      Status &= Handler.onInstructionExecuted(SI, AnyValue());
+  }
+
   void visitInstruction(Instruction &I) {
     Handler.onUnrecognizedInstruction(I);
     Status = false;
diff --git a/llvm/tools/llubi/lib/Value.h b/llvm/tools/llubi/lib/Value.h
index 6add6e7f06304..b4686160ea8b8 100644
--- a/llvm/tools/llubi/lib/Value.h
+++ b/llvm/tools/llubi/lib/Value.h
@@ -21,26 +21,55 @@ class MemoryObject;
 class Context;
 class AnyValue;
 
-enum class ByteKind : uint8_t {
-  // A concrete byte with a known value.
-  Concrete,
-  // A uninitialized byte. Each load from an uninitialized byte yields
-  // a nondeterministic value.
-  Undef,
-  // A poisoned byte. It occurs when the program stores a poison value to
-  // memory,
-  // or when a memory object is dead.
-  Poison,
-};
-
+/// Representation of a byte in memory.
+/// How to interpret the byte per bit:
+/// - If the concrete mask bit is 0, the bit is either undef or poison. The
+/// value bit indicates whether it is undef.
+/// - If the concrete mask bit is 1, the bit is a concrete value. The value bit
+/// stores the concrete bit value.
 struct Byte {
+  uint8_t ConcreteMask;
   uint8_t Value;
-  ByteKind Kind : 2;
-  // TODO: provenance
+  // TODO: captured capabilities of pointers.
+
+  static Byte poison() { return Byte{0, 0}; }
+  static Byte undef() { return Byte{0, 255}; }
+  static Byte concrete(uint8_t Val) { return Byte{255, Val}; }
+
+  void zeroBits(uint8_t Mask) {
+    ConcreteMask |= Mask;
+    Value &= ~Mask;
+  }
+
+  void poisonBits(uint8_t Mask) {
+    ConcreteMask &= ~Mask;
+    Value &= ~Mask;
+  }
+
+  void undefBits(uint8_t Mask) {
+    ConcreteMask &= ~Mask;
+    Value |= Mask;
+  }
+
+  void writeBits(uint8_t Mask, uint8_t Val) {
+    ConcreteMask |= Mask;
+    Value = (Value & ~Mask) | (Val & Mask);
+  }
+
+  /// Returns a logical byte that is part of two adjacent bytes.
+  /// Example with ShAmt = 5:
+  ///     |       Low       |      High       |
+  /// LSB | 0 1 0 1 0 1 0 1 | 0 0 0 0 1 1 1 1 | MSB
+  ///     Result =  | 1 0 1   0 0 0 0 1 |
+  static Byte fshr(const Byte &Low, const Byte &High, uint32_t ShAmt) {
+    return Byte{static_cast<uint8_t>(
+                    (Low.ConcreteMask | (High.ConcreteMask << 8)) >> ShAmt),
+                static_cast<uint8_t>((Low.Value | (High.Value << 8)) >> ShAmt)};
+  }
 
-  void set(uint8_t V) {
-    Value = V;
-    Kind = ByteKind::Concrete;
+  Byte lshr(uint8_t Shift) const {
+    return Byte{static_cast<uint8_t>(ConcreteMask >> Shift),
+                static_cast<uint8_t>(Value >> Shift)};
   }
 };
 
diff --git a/llvm/tools/llubi/llubi.cpp b/llvm/tools/llubi/llubi.cpp
index 1d2d4dc050b5d..de76a7e64c27b 100644
--- a/llvm/tools/llubi/llubi.cpp
+++ b/llvm/tools/llubi/llubi.cpp
@@ -74,6 +74,19 @@ static cl::opt<unsigned>
     VScale("vscale", cl::desc("The value of llvm.vscale (default = 4)"),
            cl::value_desc("N"), cl::init(4), cl::cat(InterpreterCategory));
 
+static cl::opt<unsigned>
+    Seed("seed",
+         cl::desc("Random seed for non-deterministic behavior (default = 0)"),
+         cl::value_desc("N"), cl::init(0), cl::cat(InterpreterCategory));
+
+cl::opt<ubi::UndefValueBehavior> UndefBehavior(
+    "", cl::desc("Choose undef value behavior:"),
+    cl::values(clEnumVal(ubi::UndefValueBehavior::NonDeterministic,
+                         "Each load of an uninitialized byte yields a freshly "
+                         "random value."),
+               clEnumVal(ubi::UndefValueBehavior::Zero,
+                         "All uses of an uninitialized byte yield zero.")));
+
 class VerboseEventHandler : public ubi::EventHandler {
 public:
   bool onInstructionExecuted(Instruction &I,
@@ -164,6 +177,8 @@ int main(int argc, char **argv) {
   Ctx.setVScale(VScale);
   Ctx.setMaxSteps(MaxSteps);
   Ctx.setMaxStackDepth(MaxStackDepth);
+  Ctx.setUndefValueBehavior(UndefBehavior);
+  Ctx.reseed(Seed);
 
   if (!Ctx.initGlobalValues()) {
     WithColor::error() << "Failed to initialize global values (e.g., the "
@@ -182,8 +197,8 @@ int main(int argc, char **argv) {
   }
   TargetLibraryInfo TLI(Ctx.getTLIImpl());
   Type *IntTy = IntegerType::get(Ctx.getContext(), TLI.getIntSize());
-  auto *MainFuncTy = FunctionType::get(
-      IntTy, {IntTy, PointerType::getUnqual(Ctx.getContext())}, false);
+  Type *PtrTy = PointerType::getUnqual(Ctx.getContext());
+  auto *MainFuncTy = FunctionType::get(IntTy, {IntTy, PtrTy}, false);
   SmallVector<ubi::AnyValue> Args;
   if (EntryFn->getFunctionType() == MainFuncTy) {
     Args.push_back(
@@ -206,8 +221,8 @@ int main(int argc, char **argv) {
         return 1;
       }
       ubi::Pointer ArgPtr = Ctx.deriveFromMemoryObject(ArgvStrMem);
-      ArgvStrMem->writeRawBytes(0, Arg.c_str(), Arg.length());
-      ArgvPtrsMem->writePointer(Idx * PtrSize, ArgPtr, Ctx.getDataLayout());
+      Ctx.storeRawBytes(*ArgvStrMem, 0, Arg.c_str(), Arg.length());
+      Ctx.store(*ArgvPtrsMem, Idx * PtrSize, ArgPtr, PtrTy);
     }
     Args.push_back(Ctx.deriveFromMemoryObject(ArgvPtrsMem));
   } else if (!EntryFn->arg_empty()) {

>From ac5081a72ed52cef08d491f393314d0fc89fdd81 Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Sat, 7 Mar 2026 09:34:28 -0800
Subject: [PATCH 11/33] [SLP][NFC]Add a test with the missing matching of same
 nodes with reuses, NFC

---
 .../SLPVectorizer/RISCV/same-node-reused.ll   | 98 +++++++++++++++++++
 1 file changed, 98 insertions(+)
 create mode 100644 llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll

diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll
new file mode 100644
index 0000000000000..fb76cfcdcef5f
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=slp-vectorizer -S -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=-1 -riscv-v-slp-max-vf=0 < %s | FileCheck %s
+
+define void @test(ptr %dest, ptr %p) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[DEST:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[INC0:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 1
+; CHECK-NEXT:    [[INC1:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 2
+; CHECK-NEXT:    [[INC2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 3
+; CHECK-NEXT:    [[E1:%.*]] = load i16, ptr [[INC0]], align 2
+; CHECK-NEXT:    [[E3:%.*]] = load i16, ptr [[INC2]], align 2
+; CHECK-NEXT:    [[INC5:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 2
+; CHECK-NEXT:    [[INC6:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 3
+; CHECK-NEXT:    [[TMP0:%.*]] = call <3 x i16> @llvm.masked.load.v3i16.p0(ptr align 4 [[P]], <3 x i1> <i1 true, i1 false, i1 true>, <3 x i16> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT:    [[E2:%.*]] = load i16, ptr [[INC1]], align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = add <2 x i16> [[TMP1]], [[TMP1]]
+; CHECK-NEXT:    [[A2:%.*]] = add i16 [[E2]], [[E2]]
+; CHECK-NEXT:    [[A3:%.*]] = add i16 [[E2]], [[E2]]
+; CHECK-NEXT:    store <2 x i16> [[TMP2]], ptr [[DEST]], align 4
+; CHECK-NEXT:    store i16 [[A2]], ptr [[INC5]], align 2
+; CHECK-NEXT:    store i16 [[A3]], ptr [[INC6]], align 2
+; CHECK-NEXT:    ret void
+;
+entry:
+  %inc0 = getelementptr inbounds i16, ptr %p, i64 1
+  %inc1 = getelementptr inbounds i16, ptr %p, i64 2
+  %inc2 = getelementptr inbounds i16, ptr %p, i64 3
+  %e0 = load i16, ptr %p, align 4
+  %e1 = load i16, ptr %inc0, align 2
+  %e2 = load i16, ptr %inc1, align 2
+  %e3 = load i16, ptr %inc2, align 2
+
+  %a0 = add i16 %e0, %e0
+  %a1 = add i16 %e2, %e2
+  %a2 = add i16 %e2, %e2
+  %a3 = add i16 %e2, %e2
+
+  %inc4 = getelementptr inbounds i16, ptr %dest, i64 1
+  %inc5 = getelementptr inbounds i16, ptr %dest, i64 2
+  %inc6 = getelementptr inbounds i16, ptr %dest, i64 3
+
+  store i16 %a0, ptr %dest, align 4
+  store i16 %a1, ptr %inc4, align 2
+  store i16 %a2, ptr %inc5, align 2
+  store i16 %a3, ptr %inc6, align 2
+  ret void
+}
+
+define void @test1(ptr %dest, ptr %p) {
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ptr [[DEST:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[INC0:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 1
+; CHECK-NEXT:    [[INC1:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 2
+; CHECK-NEXT:    [[INC2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 3
+; CHECK-NEXT:    [[E0:%.*]] = load i16, ptr [[P]], align 4
+; CHECK-NEXT:    [[E1:%.*]] = load i16, ptr [[INC0]], align 2
+; CHECK-NEXT:    [[E2:%.*]] = load i16, ptr [[INC1]], align 2
+; CHECK-NEXT:    [[E3:%.*]] = load i16, ptr [[INC2]], align 2
+; CHECK-NEXT:    [[A0:%.*]] = add i16 [[E0]], [[E0]]
+; CHECK-NEXT:    [[A1:%.*]] = shl i16 [[E2]], 1
+; CHECK-NEXT:    [[A2:%.*]] = shl i16 [[E2]], 1
+; CHECK-NEXT:    [[A3:%.*]] = shl i16 [[E2]], 1
+; CHECK-NEXT:    [[INC4:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 1
+; CHECK-NEXT:    [[INC5:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 2
+; CHECK-NEXT:    [[INC6:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 3
+; CHECK-NEXT:    store i16 [[A0]], ptr [[DEST]], align 4
+; CHECK-NEXT:    store i16 [[A1]], ptr [[INC4]], align 2
+; CHECK-NEXT:    store i16 [[A2]], ptr [[INC5]], align 2
+; CHECK-NEXT:    store i16 [[A3]], ptr [[INC6]], align 2
+; CHECK-NEXT:    ret void
+;
+entry:
+  %inc0 = getelementptr inbounds i16, ptr %p, i64 1
+  %inc1 = getelementptr inbounds i16, ptr %p, i64 2
+  %inc2 = getelementptr inbounds i16, ptr %p, i64 3
+  %e0 = load i16, ptr %p, align 4
+  %e1 = load i16, ptr %inc0, align 2
+  %e2 = load i16, ptr %inc1, align 2
+  %e3 = load i16, ptr %inc2, align 2
+
+  %a0 = add i16 %e0, %e0
+  %a1 = shl i16 %e2, 1
+  %a2 = shl i16 %e2, 1
+  %a3 = shl i16 %e2, 1
+
+  %inc4 = getelementptr inbounds i16, ptr %dest, i64 1
+  %inc5 = getelementptr inbounds i16, ptr %dest, i64 2
+  %inc6 = getelementptr inbounds i16, ptr %dest, i64 3
+
+  store i16 %a0, ptr %dest, align 4
+  store i16 %a1, ptr %inc4, align 2
+  store i16 %a2, ptr %inc5, align 2
+  store i16 %a3, ptr %inc6, align 2
+  ret void
+}

>From 810af8a3236acb953ca8e3f46849830cfc08d442 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Sat, 7 Mar 2026 18:58:23 +0100
Subject: [PATCH 12/33] clang/AMDGPU: Add missing field to implicit kernarg
 struct definition (#185209)

I missed this one when figuring out all the field offsets.
---
 clang/lib/Headers/amdhsa_abi.h   |   5 +-
 clang/test/Headers/amdhsa_abi.cl | 146 ++++++++++++++++---------------
 2 files changed, 80 insertions(+), 71 deletions(-)

diff --git a/clang/lib/Headers/amdhsa_abi.h b/clang/lib/Headers/amdhsa_abi.h
index 7dcf5b7f8624a..f76fe55a6cc21 100644
--- a/clang/lib/Headers/amdhsa_abi.h
+++ b/clang/lib/Headers/amdhsa_abi.h
@@ -19,7 +19,8 @@ typedef struct __attribute__((aligned(8))) amdhsa_implicit_kernarg_v5 {
   char reserved0[16];
   uint64_t global_offset[3];
   uint16_t grid_dims;
-  char reserved1[14];
+  char reserved1[6];
+  __attribute__((opencl_global)) void *printf_buffer;
   __attribute__((opencl_global)) void *hostcall_buffer;
   __attribute__((opencl_global)) void *multigrid_sync_arg;
   __attribute__((opencl_global)) void *heap_v1;
@@ -60,6 +61,8 @@ _Static_assert(offsetof(amdhsa_implicit_kernarg_v5, global_offset[2]) == 56,
                "wrong offset");
 _Static_assert(offsetof(amdhsa_implicit_kernarg_v5, grid_dims) == 64,
                "wrong offset");
+_Static_assert(offsetof(amdhsa_implicit_kernarg_v5, printf_buffer) == 72,
+               "wrong offset");
 _Static_assert(offsetof(amdhsa_implicit_kernarg_v5, hostcall_buffer) == 80,
                "wrong offset");
 _Static_assert(offsetof(amdhsa_implicit_kernarg_v5, multigrid_sync_arg) == 88,
diff --git a/clang/test/Headers/amdhsa_abi.cl b/clang/test/Headers/amdhsa_abi.cl
index 170ae925c8c41..258a08d2f1843 100644
--- a/clang/test/Headers/amdhsa_abi.cl
+++ b/clang/test/Headers/amdhsa_abi.cl
@@ -17,99 +17,103 @@ void use_ptr(__global void* p);
 // CHECK-NEXT:    [[IMPLICIT_ARGS:%.*]] = alloca ptr addrspace(4), align 8, addrspace(5)
 // CHECK-NEXT:    call void @llvm.lifetime.start.p5(ptr addrspace(5) [[IMPLICIT_ARGS]]) #[[ATTR4:[0-9]+]]
 // CHECK-NEXT:    [[TMP0:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-// CHECK-NEXT:    store ptr addrspace(4) [[TMP0]], ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8:![0-9]+]]
-// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    store ptr addrspace(4) [[TMP0]], ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7:![0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[BLOCK_COUNT:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5:%.*]], ptr addrspace(4) [[TMP1]], i32 0, i32 0
 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x i32], ptr addrspace(4) [[BLOCK_COUNT]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX]], align 8, !tbaa [[INT_TBAA4:![0-9]+]]
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX]], align 8, !tbaa [[INT_TBAA3:![0-9]+]]
 // CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP2]]) #[[ATTR5:[0-9]+]]
-// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[BLOCK_COUNT1:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP3]], i32 0, i32 0
 // CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x i32], ptr addrspace(4) [[BLOCK_COUNT1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA4]]
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA3]]
 // CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP4]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[BLOCK_COUNT3:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP5]], i32 0, i32 0
 // CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x i32], ptr addrspace(4) [[BLOCK_COUNT3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX4]], align 8, !tbaa [[INT_TBAA4]]
+// CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[ARRAYIDX4]], align 8, !tbaa [[INT_TBAA3]]
 // CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP6]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GROUP_SIZE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP7]], i32 0, i32 1
 // CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[GROUP_SIZE]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX5]], align 4, !tbaa [[SHORT_TBAA11:![0-9]+]]
+// CHECK-NEXT:    [[TMP8:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX5]], align 4, !tbaa [[SHORT_TBAA10:![0-9]+]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP8]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP9:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP9:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GROUP_SIZE6:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP9]], i32 0, i32 1
 // CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[GROUP_SIZE6]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP10:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX7]], align 2, !tbaa [[SHORT_TBAA11]]
+// CHECK-NEXT:    [[TMP10:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX7]], align 2, !tbaa [[SHORT_TBAA10]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP10]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP11:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP11:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GROUP_SIZE8:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP11]], i32 0, i32 1
 // CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[GROUP_SIZE8]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX9]], align 4, !tbaa [[SHORT_TBAA11]]
+// CHECK-NEXT:    [[TMP12:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX9]], align 4, !tbaa [[SHORT_TBAA10]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP12]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP13:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP13:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[REMAINDER:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP13]], i32 0, i32 2
 // CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[REMAINDER]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP14:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX10]], align 2, !tbaa [[SHORT_TBAA11]]
+// CHECK-NEXT:    [[TMP14:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX10]], align 2, !tbaa [[SHORT_TBAA10]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP14]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[REMAINDER11:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP15]], i32 0, i32 2
 // CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[REMAINDER11]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP16:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX12]], align 2, !tbaa [[SHORT_TBAA11]]
+// CHECK-NEXT:    [[TMP16:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX12]], align 2, !tbaa [[SHORT_TBAA10]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP16]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[REMAINDER13:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP17]], i32 0, i32 2
 // CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [3 x i16], ptr addrspace(4) [[REMAINDER13]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP18:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX14]], align 2, !tbaa [[SHORT_TBAA11]]
+// CHECK-NEXT:    [[TMP18:%.*]] = load i16, ptr addrspace(4) [[ARRAYIDX14]], align 2, !tbaa [[SHORT_TBAA10]]
 // CHECK-NEXT:    call void @use_u16(i16 noundef zeroext [[TMP18]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GLOBAL_OFFSET:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP19]], i32 0, i32 4
 // CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [3 x i64], ptr addrspace(4) [[GLOBAL_OFFSET]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP20:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX15]], align 8, !tbaa [[LONG_TBAA13:![0-9]+]]
+// CHECK-NEXT:    [[TMP20:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX15]], align 8, !tbaa [[LONG_TBAA12:![0-9]+]]
 // CHECK-NEXT:    call void @use_u64(i64 noundef [[TMP20]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GLOBAL_OFFSET16:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP21]], i32 0, i32 4
 // CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds [3 x i64], ptr addrspace(4) [[GLOBAL_OFFSET16]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP22:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX17]], align 8, !tbaa [[LONG_TBAA13]]
+// CHECK-NEXT:    [[TMP22:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX17]], align 8, !tbaa [[LONG_TBAA12]]
 // CHECK-NEXT:    call void @use_u64(i64 noundef [[TMP22]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP23:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
+// CHECK-NEXT:    [[TMP23:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
 // CHECK-NEXT:    [[GLOBAL_OFFSET18:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP23]], i32 0, i32 4
 // CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds [3 x i64], ptr addrspace(4) [[GLOBAL_OFFSET18]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP24:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX19]], align 8, !tbaa [[LONG_TBAA13]]
+// CHECK-NEXT:    [[TMP24:%.*]] = load i64, ptr addrspace(4) [[ARRAYIDX19]], align 8, !tbaa [[LONG_TBAA12]]
 // CHECK-NEXT:    call void @use_u64(i64 noundef [[TMP24]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP25:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[HOSTCALL_BUFFER:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP25]], i32 0, i32 7
-// CHECK-NEXT:    [[TMP26:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[HOSTCALL_BUFFER]], align 8, !tbaa [[ANYPTR_TBAA15:![0-9]+]]
+// CHECK-NEXT:    [[TMP25:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[PRINTF_BUFFER:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP25]], i32 0, i32 7
+// CHECK-NEXT:    [[TMP26:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[PRINTF_BUFFER]], align 8, !tbaa [[ANYPTR_TBAA14:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP26]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP27:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[MULTIGRID_SYNC_ARG:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP27]], i32 0, i32 8
-// CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[MULTIGRID_SYNC_ARG]], align 8, !tbaa [[ANYPTR_TBAA17:![0-9]+]]
+// CHECK-NEXT:    [[TMP27:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[HOSTCALL_BUFFER:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP27]], i32 0, i32 8
+// CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[HOSTCALL_BUFFER]], align 8, !tbaa [[ANYPTR_TBAA16:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP28]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[HEAP_V1:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP29]], i32 0, i32 9
-// CHECK-NEXT:    [[TMP30:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[HEAP_V1]], align 8, !tbaa [[ANYPTR_TBAA18:![0-9]+]]
+// CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[MULTIGRID_SYNC_ARG:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP29]], i32 0, i32 9
+// CHECK-NEXT:    [[TMP30:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[MULTIGRID_SYNC_ARG]], align 8, !tbaa [[ANYPTR_TBAA17:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP30]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[DEFAULT_QUEUE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP31]], i32 0, i32 10
-// CHECK-NEXT:    [[TMP32:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[DEFAULT_QUEUE]], align 8, !tbaa [[ANYPTR_TBAA19:![0-9]+]]
+// CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[HEAP_V1:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP31]], i32 0, i32 10
+// CHECK-NEXT:    [[TMP32:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[HEAP_V1]], align 8, !tbaa [[ANYPTR_TBAA18:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP32]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP33:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[COMPLETION_ACTION:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP33]], i32 0, i32 11
-// CHECK-NEXT:    [[TMP34:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[COMPLETION_ACTION]], align 8, !tbaa [[ANYPTR_TBAA20:![0-9]+]]
+// CHECK-NEXT:    [[TMP33:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[DEFAULT_QUEUE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP33]], i32 0, i32 11
+// CHECK-NEXT:    [[TMP34:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[DEFAULT_QUEUE]], align 8, !tbaa [[ANYPTR_TBAA19:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP34]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[QUEUE_PTR:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP35]], i32 0, i32 15
-// CHECK-NEXT:    [[TMP36:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[QUEUE_PTR]], align 8, !tbaa [[ANYPTR_TBAA21:![0-9]+]]
+// CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[COMPLETION_ACTION:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP35]], i32 0, i32 12
+// CHECK-NEXT:    [[TMP36:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[COMPLETION_ACTION]], align 8, !tbaa [[ANYPTR_TBAA20:![0-9]+]]
 // CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP36]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP37:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[PRIVATE_BASE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP37]], i32 0, i32 13
-// CHECK-NEXT:    [[TMP38:%.*]] = load i32, ptr addrspace(4) [[PRIVATE_BASE]], align 8, !tbaa [[INT_TBAA22:![0-9]+]]
-// CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP38]]) #[[ATTR5]]
-// CHECK-NEXT:    [[TMP39:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]]
-// CHECK-NEXT:    [[SHARED_BASE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP39]], i32 0, i32 14
-// CHECK-NEXT:    [[TMP40:%.*]] = load i32, ptr addrspace(4) [[SHARED_BASE]], align 4, !tbaa [[INT_TBAA23:![0-9]+]]
+// CHECK-NEXT:    [[TMP37:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[QUEUE_PTR:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP37]], i32 0, i32 16
+// CHECK-NEXT:    [[TMP38:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[QUEUE_PTR]], align 8, !tbaa [[ANYPTR_TBAA21:![0-9]+]]
+// CHECK-NEXT:    call void @use_ptr(ptr addrspace(1) noundef [[TMP38]]) #[[ATTR5]]
+// CHECK-NEXT:    [[TMP39:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[PRIVATE_BASE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP39]], i32 0, i32 14
+// CHECK-NEXT:    [[TMP40:%.*]] = load i32, ptr addrspace(4) [[PRIVATE_BASE]], align 8, !tbaa [[INT_TBAA22:![0-9]+]]
 // CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP40]]) #[[ATTR5]]
+// CHECK-NEXT:    [[TMP41:%.*]] = load ptr addrspace(4), ptr addrspace(5) [[IMPLICIT_ARGS]], align 8, !tbaa [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]]
+// CHECK-NEXT:    [[SHARED_BASE:%.*]] = getelementptr inbounds nuw [[STRUCT_AMDHSA_IMPLICIT_KERNARG_V5]], ptr addrspace(4) [[TMP41]], i32 0, i32 15
+// CHECK-NEXT:    [[TMP42:%.*]] = load i32, ptr addrspace(4) [[SHARED_BASE]], align 4, !tbaa [[INT_TBAA23:![0-9]+]]
+// CHECK-NEXT:    call void @use_u32(i32 noundef [[TMP42]]) #[[ATTR5]]
 // CHECK-NEXT:    call void @llvm.lifetime.end.p5(ptr addrspace(5) [[IMPLICIT_ARGS]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
 //
@@ -133,6 +137,7 @@ void test_implicit_args_v5() {
   use_u64(implicit_args->global_offset[1]);
   use_u64(implicit_args->global_offset[2]);
 
+  use_ptr(implicit_args->printf_buffer);
   use_ptr(implicit_args->hostcall_buffer);
   use_ptr(implicit_args->multigrid_sync_arg);
   use_ptr(implicit_args->heap_v1);
@@ -143,24 +148,25 @@ void test_implicit_args_v5() {
   use_u32(implicit_args->shared_base);
 }
 //.
-// CHECK: [[INT_TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// CHECK: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// CHECK: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// CHECK: [[META7]] = !{!"Simple C/C++ TBAA"}
-// CHECK: [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA8]] = !{[[META9:![0-9]+]], [[META9]], i64 0}
-// CHECK: [[META9]] = !{!"p1 _ZTS26amdhsa_implicit_kernarg_v5", [[META10:![0-9]+]], i64 0}
-// CHECK: [[META10]] = !{!"any pointer", [[META6]], i64 0}
-// CHECK: [[SHORT_TBAA11]] = !{[[META12:![0-9]+]], [[META12]], i64 0}
-// CHECK: [[META12]] = !{!"short", [[META6]], i64 0}
-// CHECK: [[LONG_TBAA13]] = !{[[META14:![0-9]+]], [[META14]], i64 0}
-// CHECK: [[META14]] = !{!"long", [[META6]], i64 0}
-// CHECK: [[ANYPTR_TBAA15]] = !{[[META16:![0-9]+]], [[META10]], i64 80}
-// CHECK: [[META16]] = !{!"amdhsa_implicit_kernarg_v5", [[META6]], i64 0, [[META6]], i64 12, [[META6]], i64 18, [[META6]], i64 24, [[META6]], i64 40, [[META12]], i64 64, [[META6]], i64 66, [[META10]], i64 80, [[META10]], i64 88, [[META10]], i64 96, [[META10]], i64 104, [[META10]], i64 112, [[META6]], i64 120, [[META5]], i64 192, [[META5]], i64 196, [[META10]], i64 200, [[META6]], i64 208}
-// CHECK: [[ANYPTR_TBAA17]] = !{[[META16]], [[META10]], i64 88}
-// CHECK: [[ANYPTR_TBAA18]] = !{[[META16]], [[META10]], i64 96}
-// CHECK: [[ANYPTR_TBAA19]] = !{[[META16]], [[META10]], i64 104}
-// CHECK: [[ANYPTR_TBAA20]] = !{[[META16]], [[META10]], i64 112}
-// CHECK: [[ANYPTR_TBAA21]] = !{[[META16]], [[META10]], i64 200}
-// CHECK: [[INT_TBAA22]] = !{[[META16]], [[META5]], i64 192}
-// CHECK: [[INT_TBAA23]] = !{[[META16]], [[META5]], i64 196}
+// CHECK: [[INT_TBAA3]] = !{[[META4:![0-9]+]], [[META4]], i64 0}
+// CHECK: [[META4]] = !{!"int", [[META5:![0-9]+]], i64 0}
+// CHECK: [[META5]] = !{!"omnipotent char", [[META6:![0-9]+]], i64 0}
+// CHECK: [[META6]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[_ZTS26AMDHSA_IMPLICIT_KERNARG_V5PTR_TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK: [[META8]] = !{!"p1 _ZTS26amdhsa_implicit_kernarg_v5", [[META9:![0-9]+]], i64 0}
+// CHECK: [[META9]] = !{!"any pointer", [[META5]], i64 0}
+// CHECK: [[SHORT_TBAA10]] = !{[[META11:![0-9]+]], [[META11]], i64 0}
+// CHECK: [[META11]] = !{!"short", [[META5]], i64 0}
+// CHECK: [[LONG_TBAA12]] = !{[[META13:![0-9]+]], [[META13]], i64 0}
+// CHECK: [[META13]] = !{!"long", [[META5]], i64 0}
+// CHECK: [[ANYPTR_TBAA14]] = !{[[META15:![0-9]+]], [[META9]], i64 72}
+// CHECK: [[META15]] = !{!"amdhsa_implicit_kernarg_v5", [[META5]], i64 0, [[META5]], i64 12, [[META5]], i64 18, [[META5]], i64 24, [[META5]], i64 40, [[META11]], i64 64, [[META5]], i64 66, [[META9]], i64 72, [[META9]], i64 80, [[META9]], i64 88, [[META9]], i64 96, [[META9]], i64 104, [[META9]], i64 112, [[META5]], i64 120, [[META4]], i64 192, [[META4]], i64 196, [[META9]], i64 200, [[META5]], i64 208}
+// CHECK: [[ANYPTR_TBAA16]] = !{[[META15]], [[META9]], i64 80}
+// CHECK: [[ANYPTR_TBAA17]] = !{[[META15]], [[META9]], i64 88}
+// CHECK: [[ANYPTR_TBAA18]] = !{[[META15]], [[META9]], i64 96}
+// CHECK: [[ANYPTR_TBAA19]] = !{[[META15]], [[META9]], i64 104}
+// CHECK: [[ANYPTR_TBAA20]] = !{[[META15]], [[META9]], i64 112}
+// CHECK: [[ANYPTR_TBAA21]] = !{[[META15]], [[META9]], i64 200}
+// CHECK: [[INT_TBAA22]] = !{[[META15]], [[META4]], i64 192}
+// CHECK: [[INT_TBAA23]] = !{[[META15]], [[META4]], i64 196}
 //.

>From c6805abf77117ebe51920010fd1838f835d8536f Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Sat, 7 Mar 2026 18:16:00 +0000
Subject: [PATCH 13/33] [X86] narrow-shl-load.ll - regenerate test checks
 (#185211)

---
 llvm/test/CodeGen/X86/narrow-shl-load.ll | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/llvm/test/CodeGen/X86/narrow-shl-load.ll b/llvm/test/CodeGen/X86/narrow-shl-load.ll
index bb9d3914e4f10..b1aa546e3f608 100644
--- a/llvm/test/CodeGen/X86/narrow-shl-load.ll
+++ b/llvm/test/CodeGen/X86/narrow-shl-load.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -7,6 +8,12 @@ target triple = "x86_64-pc-linux-gnu"
 ; rdar://8606584
 
 define void @test1() nounwind readnone {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0: # %bb.nph
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  .LBB0_1: # %while.cond
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    jmp .LBB0_1
 bb.nph:
   br label %while.cond
 
@@ -33,6 +40,12 @@ while.end:                                        ; preds = %while.cond
 ; DAG Combiner can't fold this into a load of the 1'th byte.
 ; PR8757
 define i32 @test3(ptr%P) nounwind ssp {
+; CHECK-LABEL: test3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $128, (%rdi)
+; CHECK-NEXT:    movsbl (%rdi), %eax
+; CHECK-NEXT:    movzbl %ah, %eax
+; CHECK-NEXT:    retq
   store volatile i32 128, ptr %P
   %tmp4.pre = load i32, ptr %P
   %phitmp = trunc i32 %tmp4.pre to i16
@@ -41,9 +54,4 @@ define i32 @test3(ptr%P) nounwind ssp {
   %phitmp15 = lshr i16 %phitmp14, 8
   %phitmp16 = zext i16 %phitmp15 to i32
   ret i32 %phitmp16
-  
-; CHECK: movl	$128, (%rdi)
-; CHECK-NEXT: movsbl	(%rdi), %eax
-; CHECK-NEXT: movzbl	%ah, %eax
-; CHECK-NEXT: ret
 }

>From a96a0ded25c63e2a9d5838fe50e578a3339743fd Mon Sep 17 00:00:00 2001
From: Alexey Bataev <a.bataev at outlook.com>
Date: Sat, 7 Mar 2026 10:26:42 -0800
Subject: [PATCH 14/33] [SLP]Fix the matching of the nodes with the same
 scalars, but reused

If the scalars are reused and the ReuseShuffleIndices is set, we may
miss matching for the buildvector/gather nodes and add an extra cost
---
 .../Transforms/Vectorize/SLPVectorizer.cpp    | 24 ++++++++++++++++---
 .../SLPVectorizer/RISCV/same-node-reused.ll   | 13 +++-------
 2 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 631af61e2cfba..8bc16972beddc 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -18224,12 +18224,19 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
   SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
   SmallDenseMap<Value *, int> UsedValuesEntry;
   SmallPtrSet<const Value *, 16> VisitedValue;
+  bool IsReusedNodeFound = false;
   auto CheckAndUseSameNode = [&](const TreeEntry *TEPtr) {
     // The node is reused - exit.
+    if (IsReusedNodeFound)
+      return false;
     if ((TEPtr->getVectorFactor() != VL.size() &&
          TEPtr->Scalars.size() != VL.size()) ||
         (!TEPtr->isSame(VL) && !TEPtr->isSame(TE->Scalars)))
       return false;
+    IsReusedNodeFound =
+        equal(TE->Scalars, TEPtr->Scalars) &&
+        equal(TE->ReorderIndices, TEPtr->ReorderIndices) &&
+        equal(TE->ReuseShuffleIndices, TEPtr->ReuseShuffleIndices);
     UsedTEs.clear();
     UsedTEs.emplace_back().insert(TEPtr);
     for (Value *V : VL) {
@@ -18424,6 +18431,8 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
         VToTEs.insert(VTE);
       }
     }
+    if (IsReusedNodeFound)
+      break;
     if (VToTEs.empty())
       continue;
     if (UsedTEs.empty()) {
@@ -18482,7 +18491,7 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
       return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars);
     });
     if (It != FirstEntries.end() &&
-        ((*It)->getVectorFactor() == VL.size() ||
+        (IsReusedNodeFound || (*It)->getVectorFactor() == VL.size() ||
          ((*It)->getVectorFactor() == TE->Scalars.size() &&
           TE->ReuseShuffleIndices.size() == VL.size() &&
           (*It)->isSame(TE->Scalars)))) {
@@ -20149,9 +20158,18 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
         // Reset the builder(s) to correctly handle perfect diamond matched
         // nodes.
         ShuffleBuilder.resetForSameNode();
-        ShuffleBuilder.add(*FrontTE, Mask);
         // Full matched entry found, no need to insert subvectors.
-        Res = ShuffleBuilder.finalize(E->getCommonMask(), {}, {});
+        if (equal(E->Scalars, FrontTE->Scalars) &&
+            equal(E->ReorderIndices, FrontTE->ReorderIndices) &&
+            equal(E->ReuseShuffleIndices, FrontTE->ReuseShuffleIndices)) {
+          Mask.resize(FrontTE->getVectorFactor());
+          std::iota(Mask.begin(), Mask.end(), 0);
+          ShuffleBuilder.add(*FrontTE, Mask);
+          Res = ShuffleBuilder.finalize({}, {}, {});
+        } else {
+          ShuffleBuilder.add(*FrontTE, Mask);
+          Res = ShuffleBuilder.finalize(E->getCommonMask(), {}, {});
+        }
         return Res;
       }
       if (!Resized) {
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll
index fb76cfcdcef5f..e007a0dae11f2 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/same-node-reused.ll
@@ -6,21 +6,14 @@ define void @test(ptr %dest, ptr %p) {
 ; CHECK-SAME: ptr [[DEST:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[INC0:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 1
-; CHECK-NEXT:    [[INC1:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 2
 ; CHECK-NEXT:    [[INC2:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 3
 ; CHECK-NEXT:    [[E1:%.*]] = load i16, ptr [[INC0]], align 2
 ; CHECK-NEXT:    [[E3:%.*]] = load i16, ptr [[INC2]], align 2
-; CHECK-NEXT:    [[INC5:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 2
-; CHECK-NEXT:    [[INC6:%.*]] = getelementptr inbounds i16, ptr [[DEST]], i64 3
 ; CHECK-NEXT:    [[TMP0:%.*]] = call <3 x i16> @llvm.masked.load.v3i16.p0(ptr align 4 [[P]], <3 x i1> <i1 true, i1 false, i1 true>, <3 x i16> poison)
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> poison, <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT:    [[E2:%.*]] = load i16, ptr [[INC1]], align 2
-; CHECK-NEXT:    [[TMP2:%.*]] = add <2 x i16> [[TMP1]], [[TMP1]]
-; CHECK-NEXT:    [[A2:%.*]] = add i16 [[E2]], [[E2]]
-; CHECK-NEXT:    [[A3:%.*]] = add i16 [[E2]], [[E2]]
-; CHECK-NEXT:    store <2 x i16> [[TMP2]], ptr [[DEST]], align 4
-; CHECK-NEXT:    store i16 [[A2]], ptr [[INC5]], align 2
-; CHECK-NEXT:    store i16 [[A3]], ptr [[INC6]], align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <3 x i16> [[TMP0]], <3 x i16> poison, <4 x i32> <i32 0, i32 2, i32 2, i32 2>
+; CHECK-NEXT:    [[TMP3:%.*]] = add <4 x i16> [[TMP2]], [[TMP2]]
+; CHECK-NEXT:    store <4 x i16> [[TMP3]], ptr [[DEST]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:

>From 0c9dfe1f7cab088ac9d3eb71ab6db5afd3e8a96f Mon Sep 17 00:00:00 2001
From: Keith Smiley <keithbsmiley at gmail.com>
Date: Sat, 7 Mar 2026 11:18:31 -0800
Subject: [PATCH 15/33] [bazel] Disable parse_headers on gtest headers
 (#185138)

One of the headers has a circular dependency issue that makes it not
isolated

```
.../googletest/include/gtest/internal/custom/gtest-printers.h:53:12: error: no member named 'testing' in the global namespace
   53 |   *OS << ::testing::PrintToString(S.str());
      |          ~~^
```
---
 .../bazel/llvm-project-overlay/third-party/unittest/BUILD.bazel  | 1 +
 1 file changed, 1 insertion(+)

diff --git a/utils/bazel/llvm-project-overlay/third-party/unittest/BUILD.bazel b/utils/bazel/llvm-project-overlay/third-party/unittest/BUILD.bazel
index ff179a71ec113..1ba1f6bd5a0e7 100644
--- a/utils/bazel/llvm-project-overlay/third-party/unittest/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/third-party/unittest/BUILD.bazel
@@ -67,6 +67,7 @@ cc_library(
         "@platforms//os:windows": ["GTEST_USE_OWN_TR1_TUPLE=0"],
         "//conditions:default": ["GTEST_USE_OWN_TR1_TUPLE=1"],
     }),
+    features = ["-parse_headers"],
     includes = [
         "googletest/include",
         "include",

>From 7dd069da0fc1c70cc8abe873ca767fe92dbb3881 Mon Sep 17 00:00:00 2001
From: Keith Smiley <keithbsmiley at gmail.com>
Date: Sat, 7 Mar 2026 11:21:09 -0800
Subject: [PATCH 16/33] [bazel] Bump to latest point release (#185139)

Only minor fixes, just staying up to date.
---
 utils/bazel/.bazelversion     |  2 +-
 utils/bazel/MODULE.bazel.lock | 21 +++++++++++----------
 2 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/utils/bazel/.bazelversion b/utils/bazel/.bazelversion
index e7fdef7e2e635..acd405b1d62e3 100644
--- a/utils/bazel/.bazelversion
+++ b/utils/bazel/.bazelversion
@@ -1 +1 @@
-8.4.2
+8.6.0
diff --git a/utils/bazel/MODULE.bazel.lock b/utils/bazel/MODULE.bazel.lock
index b01e8474f70e0..038e5659baf94 100644
--- a/utils/bazel/MODULE.bazel.lock
+++ b/utils/bazel/MODULE.bazel.lock
@@ -1,5 +1,5 @@
 {
-  "lockFileVersion": 18,
+  "lockFileVersion": 24,
   "registryFileHashes": {
     "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
     "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
@@ -234,7 +234,7 @@
   "moduleExtensions": {
     "//:extensions.bzl%llvm_repos_extension": {
       "general": {
-        "bzlTransitiveDigest": "DqyjXvFpPyfsmyt58EuR0v0Xy+nPN74iTGxjOW99OcQ=",
+        "bzlTransitiveDigest": "rTcAYj9/bbplfywX07/EVPa4ovMYVYbNSlREdk0XNSI=",
         "usagesDigest": "X0yUkkWyxQ2Y5oZVDkRSE/K4YkDWo1IjhHsL+1weKyU=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -356,7 +356,7 @@
     },
     "@@pybind11_bazel+//:internal_configure.bzl%internal_configure_extension": {
       "general": {
-        "bzlTransitiveDigest": "G7xCmtNWXRuBtChRgB5OK5+gmM8Uoy8Mec/B7j3fhqs=",
+        "bzlTransitiveDigest": "NFQjcZF+fAvf5fDH+pqsx4JrfzP9PuHBz6S6ZutIbnw=",
         "usagesDigest": "D1r3lfzMuUBFxgG8V6o0bQTLMk3GkaGOaPzw53wrwyw=",
         "recordedFileInputs": {
           "@@pybind11_bazel+//MODULE.bazel": "e6f4c20442eaa7c90d7190d8dc539d0ab422f95c65a57cc59562170c58ae3d34"
@@ -386,7 +386,7 @@
     },
     "@@rules_android+//bzlmod_extensions:apksig.bzl%apksig_extension": {
       "general": {
-        "bzlTransitiveDigest": "oNxmR17oNBDtqdMtQ04f77G1dxpDmcW7KbgGb9I5vp0=",
+        "bzlTransitiveDigest": "By9qVNN7G4oL1vYOJXye7Dp/CbR2ar9oxAW8WXAVcVw=",
         "usagesDigest": "xq6OVkELeJvOgYo3oY/sUBsGFbcqdV+9BYiNgSPV/po=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -411,7 +411,7 @@
     },
     "@@rules_android+//bzlmod_extensions:com_android_dex.bzl%com_android_dex_extension": {
       "general": {
-        "bzlTransitiveDigest": "Rux2kPXUP93tJLEXkVk6q0CHomvsLAuRfqBNU8xddsY=",
+        "bzlTransitiveDigest": "rvWbJQc8jInfIAaXIMhSOqUlwM9HVeLey6q0ISvg08Y=",
         "usagesDigest": "toF8IFMu98H/VU2p1sfVC5fVXVYJunpbbmtM6tOsQXY=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -534,7 +534,7 @@
     },
     "@@rules_apple+//apple:extensions.bzl%non_module_deps": {
       "general": {
-        "bzlTransitiveDigest": "hZq9NZQ3DfMM3SejWMrPlGSZAv38GRVt6iSG5FbwbhQ=",
+        "bzlTransitiveDigest": "4xtddSlWIQdtVNVuvOI62fJfQVETHZCVWFvYYwQHMR4=",
         "usagesDigest": "M3VqFpeTCo4qmrNKGZw0dxBHvTYDrfV3cscGzlSAhQ4=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -562,7 +562,7 @@
     },
     "@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": {
       "general": {
-        "bzlTransitiveDigest": "OlvsB0HsvxbR8ZN+J9Vf00X/+WVz/Y/5Xrq2LgcVfdo=",
+        "bzlTransitiveDigest": "nvW/NrBXlAmiQw99EMGKkLaD2KbNp2mQDlxdfpr+0Ls=",
         "usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -626,7 +626,7 @@
     },
     "@@rules_python+//python/uv:uv.bzl%uv": {
       "general": {
-        "bzlTransitiveDigest": "PmZM/pIkZKEDDL68TohlKJrWPYKL5VwUw3MA7kmm6fk=",
+        "bzlTransitiveDigest": "xfNZ/WmfkC9N/pNH0cmucTOrqBa966d9iMmmX54m1UM=",
         "usagesDigest": "icnInV8HDGrRQf9x8RMfxWfBHgT3OgRlYovS/9POEJw=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -667,7 +667,7 @@
     },
     "@@rules_swift+//swift:extensions.bzl%non_module_deps": {
       "general": {
-        "bzlTransitiveDigest": "7HCu9g5L/A6rnapg3vth7ZT5JAXGhHB5cfk39qhGYuM=",
+        "bzlTransitiveDigest": "6axDCXf6fQoPav8hojnUBxGA0FAMqLvtpC1cRsisCdw=",
         "usagesDigest": "mhACFnrdMv9Wi0Mt67bxocJqviRkDSV+Ee5Mqdj5akA=",
         "recordedFileInputs": {},
         "recordedDirentsInputs": {},
@@ -824,5 +824,6 @@
         ]
       }
     }
-  }
+  },
+  "facts": {}
 }

>From f34adfebc106a2b18e59664bf10b2b0c2f81ed69 Mon Sep 17 00:00:00 2001
From: Saleem Abdulrasool <compnerd at compnerd.org>
Date: Sat, 7 Mar 2026 11:33:31 -0800
Subject: [PATCH 17/33] build: add `LINKER:` modifier to `/DELAYLOAD:` options
 (#185086)

When building with the GNU driver, we would pass in `/DELAYLOAD:...`
without indicating that this is a linker flag. `clang` does not
implictly forward non-consumed options to the linker like `cl` does, and
this would cause the build to fail.
---
 lldb/cmake/modules/AddLLDB.cmake | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lldb/cmake/modules/AddLLDB.cmake b/lldb/cmake/modules/AddLLDB.cmake
index 6493df27f38db..ea5729f07229f 100644
--- a/lldb/cmake/modules/AddLLDB.cmake
+++ b/lldb/cmake/modules/AddLLDB.cmake
@@ -171,7 +171,8 @@ function(add_lldb_executable name)
     list(FIND ARG_LINK_LIBS liblldb LIBLLDB_INDEX)
     if(NOT LIBLLDB_INDEX EQUAL -1)
       if (MSVC)
-        target_link_options(${name} PRIVATE "/DELAYLOAD:$<TARGET_FILE_NAME:liblldb>")
+        target_link_options(${name} PRIVATE
+          "LINKER:/DELAYLOAD:$<TARGET_FILE_NAME:liblldb>")
         target_link_libraries(${name} PRIVATE delayimp)
       elseif (MINGW AND LINKER_IS_LLD)
         # LLD can delay load just by passing a --delayload flag, as long as the import

>From 58efc426d7036c9736f4cbaafa55e2b42fed1e7d Mon Sep 17 00:00:00 2001
From: Nerixyz <nerixdev at outlook.de>
Date: Sat, 7 Mar 2026 20:35:25 +0100
Subject: [PATCH 18/33] [LLDB] Move Itanium language runtime to C++ language
 runtime (#169225)

In order to support the Microsoft ABI alongside the Itanium one in the
same process from different DLLs, this moves the Itanium ABI runtime
plugin to the C++ language runtime (see
https://github.com/llvm/llvm-project/pull/168941#discussion_r2547684264).

Before this PR, the C++ language runtime wasn't a plugin. Instead, its
functionality was provided by the Itanium ABI plugin.

All Itanium specific methods are moved to a new class
`ItaniumABIRuntime`. This includes resolving the dynamic type, setting
exception filters, and getting the exception object.
The other methods were added to `CPPLanguageRuntime`.

`language cplusplus demangle` moved to `CommandObjectCPlusPlus`.

The Clang REPL depended on the C++ runtime. Now that it's a plugin, this
failed the layering check. Since the REPL doesn't use the C++ runtime, I
removed the dependency.
---
 .../LanguageRuntime/CPlusPlus/CMakeLists.txt  |  11 +-
 .../CPlusPlus/CPPLanguageRuntime.cpp          | 185 +++++++++-
 .../CPlusPlus/CPPLanguageRuntime.h            |  60 +++-
 .../CPlusPlus/CommandObjectCPlusPlus.cpp      |  68 ++++
 .../CPlusPlus/CommandObjectCPlusPlus.h        |  30 ++
 .../CPlusPlus/ItaniumABI/CMakeLists.txt       |  13 -
 .../ItaniumABI/ItaniumABILanguageRuntime.h    | 127 -------
 ...guageRuntime.cpp => ItaniumABIRuntime.cpp} | 324 +++---------------
 .../CPlusPlus/ItaniumABIRuntime.h             |  64 ++++
 lldb/source/Plugins/REPL/Clang/CMakeLists.txt |   1 -
 10 files changed, 453 insertions(+), 430 deletions(-)
 create mode 100644 lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.cpp
 create mode 100644 lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.h
 delete mode 100644 lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/CMakeLists.txt
 delete mode 100644 lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h
 rename lldb/source/Plugins/LanguageRuntime/CPlusPlus/{ItaniumABI/ItaniumABILanguageRuntime.cpp => ItaniumABIRuntime.cpp} (59%)
 create mode 100644 lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.h

diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CMakeLists.txt b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CMakeLists.txt
index 727c8290bceb4..ca54601d99cff 100644
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CMakeLists.txt
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CMakeLists.txt
@@ -1,14 +1,17 @@
-add_lldb_library(lldbPluginCPPRuntime
+add_lldb_library(lldbPluginCPPRuntime PLUGIN
+  CommandObjectCPlusPlus.cpp
   CPPLanguageRuntime.cpp
+  ItaniumABIRuntime.cpp
   VerboseTrapFrameRecognizer.cpp
 
   LINK_LIBS
+    lldbBreakpoint
     lldbCore
+    lldbInterpreter
+    lldbPluginTypeSystemClang
     lldbSymbol
     lldbTarget
+    lldbValueObject
   CLANG_LIBS
     clangCodeGen
 )
-
-add_subdirectory(ItaniumABI)
-#add_subdirectory(MicrosoftABI)
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.cpp
index 913678b629f2f..75527edca8fe4 100644
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.cpp
@@ -12,6 +12,7 @@
 #include <memory>
 
 #include "CPPLanguageRuntime.h"
+#include "CommandObjectCPlusPlus.h"
 #include "VerboseTrapFrameRecognizer.h"
 
 #include "llvm/ADT/StringRef.h"
@@ -36,6 +37,8 @@
 using namespace lldb;
 using namespace lldb_private;
 
+LLDB_PLUGIN_DEFINE_ADV(CPPLanguageRuntime, CPPRuntime)
+
 static ConstString g_this = ConstString("this");
 // Artificial coroutine-related variables emitted by clang.
 static ConstString g_promise = ConstString("__promise");
@@ -107,7 +110,7 @@ class LibCXXFrameRecognizer : public StackFrameRecognizer {
 };
 
 CPPLanguageRuntime::CPPLanguageRuntime(Process *process)
-    : LanguageRuntime(process) {
+    : LanguageRuntime(process), m_itanium_runtime(process) {
   if (process) {
     process->GetTarget().GetFrameRecognizerManager().AddRecognizer(
         StackFrameRecognizerSP(new LibCXXFrameRecognizer()), {},
@@ -491,3 +494,183 @@ bool CPPLanguageRuntime::IsSymbolARuntimeThunk(const Symbol &symbol) {
   return mangled_name.starts_with("_ZTh") || mangled_name.starts_with("_ZTv") ||
          mangled_name.starts_with("_ZTc");
 }
+
+bool CPPLanguageRuntime::CouldHaveDynamicValue(ValueObject &in_value) {
+  const bool check_cxx = true;
+  const bool check_objc = false;
+  return in_value.GetCompilerType().IsPossibleDynamicType(nullptr, check_cxx,
+                                                          check_objc);
+}
+
+bool CPPLanguageRuntime::GetDynamicTypeAndAddress(
+    ValueObject &in_value, lldb::DynamicValueType use_dynamic,
+    TypeAndOrName &class_type_or_name, Address &dynamic_address,
+    Value::ValueType &value_type, llvm::ArrayRef<uint8_t> &local_buffer) {
+  class_type_or_name.Clear();
+  value_type = Value::ValueType::Scalar;
+
+  if (!CouldHaveDynamicValue(in_value))
+    return false;
+
+  return m_itanium_runtime.GetDynamicTypeAndAddress(
+      in_value, use_dynamic, class_type_or_name, dynamic_address, value_type);
+}
+
+TypeAndOrName
+CPPLanguageRuntime::FixUpDynamicType(const TypeAndOrName &type_and_or_name,
+                                     ValueObject &static_value) {
+  CompilerType static_type(static_value.GetCompilerType());
+  Flags static_type_flags(static_type.GetTypeInfo());
+
+  TypeAndOrName ret(type_and_or_name);
+  if (type_and_or_name.HasType()) {
+    // The type will always be the type of the dynamic object.  If our parent's
+    // type was a pointer, then our type should be a pointer to the type of the
+    // dynamic object.  If a reference, then the original type should be
+    // okay...
+    CompilerType orig_type = type_and_or_name.GetCompilerType();
+    CompilerType corrected_type = orig_type;
+    if (static_type_flags.AllSet(eTypeIsPointer))
+      corrected_type = orig_type.GetPointerType();
+    else if (static_type_flags.AllSet(eTypeIsReference))
+      corrected_type = orig_type.GetLValueReferenceType();
+    ret.SetCompilerType(corrected_type);
+  } else {
+    // If we are here we need to adjust our dynamic type name to include the
+    // correct & or * symbol
+    std::string corrected_name(type_and_or_name.GetName().GetCString());
+    if (static_type_flags.AllSet(eTypeIsPointer))
+      corrected_name.append(" *");
+    else if (static_type_flags.AllSet(eTypeIsReference))
+      corrected_name.append(" &");
+    // the parent type should be a correctly pointer'ed or referenc'ed type
+    ret.SetCompilerType(static_type);
+    ret.SetName(corrected_name.c_str());
+  }
+  return ret;
+}
+
+LanguageRuntime *
+CPPLanguageRuntime::CreateInstance(Process *process,
+                                   lldb::LanguageType language) {
+  if (language == eLanguageTypeC_plus_plus ||
+      language == eLanguageTypeC_plus_plus_03 ||
+      language == eLanguageTypeC_plus_plus_11 ||
+      language == eLanguageTypeC_plus_plus_14)
+    return new CPPLanguageRuntime(process);
+  else
+    return nullptr;
+}
+
+void CPPLanguageRuntime::Initialize() {
+  PluginManager::RegisterPlugin(
+      GetPluginNameStatic(), "C++ language runtime", CreateInstance,
+      [](CommandInterpreter &interpreter) -> lldb::CommandObjectSP {
+        return CommandObjectSP(new CommandObjectCPlusPlus(interpreter));
+      });
+}
+
+void CPPLanguageRuntime::Terminate() {
+  PluginManager::UnregisterPlugin(CreateInstance);
+}
+
+llvm::Expected<LanguageRuntime::VTableInfo>
+CPPLanguageRuntime::GetVTableInfo(ValueObject &in_value, bool check_type) {
+  return m_itanium_runtime.GetVTableInfo(in_value, check_type);
+}
+
+BreakpointResolverSP
+CPPLanguageRuntime::CreateExceptionResolver(const BreakpointSP &bkpt,
+                                            bool catch_bp, bool throw_bp) {
+  return CreateExceptionResolver(bkpt, catch_bp, throw_bp, false);
+}
+
+BreakpointResolverSP
+CPPLanguageRuntime::CreateExceptionResolver(const BreakpointSP &bkpt,
+                                            bool catch_bp, bool throw_bp,
+                                            bool for_expressions) {
+  std::vector<const char *> exception_names;
+  m_itanium_runtime.AppendExceptionBreakpointFunctions(
+      exception_names, catch_bp, throw_bp, for_expressions);
+
+  BreakpointResolverSP resolver_sp(new BreakpointResolverName(
+      bkpt, exception_names.data(), exception_names.size(),
+      eFunctionNameTypeBase, eLanguageTypeUnknown, 0, eLazyBoolNo));
+
+  return resolver_sp;
+}
+
+lldb::SearchFilterSP CPPLanguageRuntime::CreateExceptionSearchFilter() {
+  Target &target = m_process->GetTarget();
+
+  FileSpecList filter_modules;
+  m_itanium_runtime.AppendExceptionBreakpointFilterModules(filter_modules,
+                                                           target);
+  return target.GetSearchFilterForModuleList(&filter_modules);
+}
+
+lldb::BreakpointSP CPPLanguageRuntime::CreateExceptionBreakpoint(
+    bool catch_bp, bool throw_bp, bool for_expressions, bool is_internal) {
+  Target &target = m_process->GetTarget();
+  FileSpecList filter_modules;
+  BreakpointResolverSP exception_resolver_sp =
+      CreateExceptionResolver(nullptr, catch_bp, throw_bp, for_expressions);
+  SearchFilterSP filter_sp(CreateExceptionSearchFilter());
+  const bool hardware = false;
+  const bool resolve_indirect_functions = false;
+  return target.CreateBreakpoint(filter_sp, exception_resolver_sp, is_internal,
+                                 hardware, resolve_indirect_functions);
+}
+
+void CPPLanguageRuntime::SetExceptionBreakpoints() {
+  if (!m_process)
+    return;
+
+  const bool catch_bp = false;
+  const bool throw_bp = true;
+  const bool is_internal = true;
+  const bool for_expressions = true;
+
+  // For the exception breakpoints set by the Expression parser, we'll be a
+  // little more aggressive and stop at exception allocation as well.
+
+  if (m_cxx_exception_bp_sp) {
+    m_cxx_exception_bp_sp->SetEnabled(true);
+  } else {
+    m_cxx_exception_bp_sp = CreateExceptionBreakpoint(
+        catch_bp, throw_bp, for_expressions, is_internal);
+    if (m_cxx_exception_bp_sp)
+      m_cxx_exception_bp_sp->SetBreakpointKind("c++ exception");
+  }
+}
+
+void CPPLanguageRuntime::ClearExceptionBreakpoints() {
+  if (!m_process)
+    return;
+
+  if (m_cxx_exception_bp_sp) {
+    m_cxx_exception_bp_sp->SetEnabled(false);
+  }
+}
+
+bool CPPLanguageRuntime::ExceptionBreakpointsAreSet() {
+  return m_cxx_exception_bp_sp && m_cxx_exception_bp_sp->IsEnabled();
+}
+
+bool CPPLanguageRuntime::ExceptionBreakpointsExplainStop(
+    lldb::StopInfoSP stop_reason) {
+  if (!m_process)
+    return false;
+
+  if (!stop_reason || stop_reason->GetStopReason() != eStopReasonBreakpoint)
+    return false;
+
+  uint64_t break_site_id = stop_reason->GetValue();
+  return m_process->GetBreakpointSiteList().StopPointSiteContainsBreakpoint(
+      break_site_id, m_cxx_exception_bp_sp->GetID());
+}
+
+lldb::ValueObjectSP
+CPPLanguageRuntime::GetExceptionObjectForThread(lldb::ThreadSP thread_sp) {
+  return m_itanium_runtime.GetExceptionObjectForThread(std::move(thread_sp));
+}
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h
index 05639e9798917..7c3dade76d703 100644
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h
@@ -13,6 +13,7 @@
 
 #include "llvm/ADT/StringMap.h"
 
+#include "ItaniumABIRuntime.h"
 #include "lldb/Core/PluginInterface.h"
 #include "lldb/Target/LanguageRuntime.h"
 #include "lldb/lldb-private.h"
@@ -42,6 +43,19 @@ class CPPLanguageRuntime : public LanguageRuntime {
 
   static char ID;
 
+  static void Initialize();
+
+  static void Terminate();
+
+  static lldb_private::LanguageRuntime *
+  CreateInstance(Process *process, lldb::LanguageType language);
+
+  static llvm::StringRef GetPluginNameStatic() {
+    return "cpp-language-runtime";
+  }
+
+  llvm::StringRef GetPluginName() override { return GetPluginNameStatic(); }
+
   bool isA(const void *ClassID) const override {
     return ClassID == &ID || LanguageRuntime::isA(ClassID);
   }
@@ -81,15 +95,55 @@ class CPPLanguageRuntime : public LanguageRuntime {
 
   bool IsSymbolARuntimeThunk(const Symbol &symbol) override;
 
-protected:
-  // Classes that inherit from CPPLanguageRuntime can see and modify these
-  CPPLanguageRuntime(Process *process);
+  llvm::Expected<LanguageRuntime::VTableInfo>
+  GetVTableInfo(ValueObject &in_value, bool check_type) override;
+
+  bool GetDynamicTypeAndAddress(ValueObject &in_value,
+                                lldb::DynamicValueType use_dynamic,
+                                TypeAndOrName &class_type_or_name,
+                                Address &address, Value::ValueType &value_type,
+                                llvm::ArrayRef<uint8_t> &local_buffer) override;
+
+  TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name,
+                                 ValueObject &static_value) override;
+
+  bool CouldHaveDynamicValue(ValueObject &in_value) override;
+
+  void SetExceptionBreakpoints() override;
+
+  void ClearExceptionBreakpoints() override;
+
+  bool ExceptionBreakpointsAreSet() override;
+
+  bool ExceptionBreakpointsExplainStop(lldb::StopInfoSP stop_reason) override;
+
+  lldb::BreakpointResolverSP
+  CreateExceptionResolver(const lldb::BreakpointSP &bkpt, bool catch_bp,
+                          bool throw_bp) override;
+
+  lldb::SearchFilterSP CreateExceptionSearchFilter() override;
+
+  lldb::ValueObjectSP
+  GetExceptionObjectForThread(lldb::ThreadSP thread_sp) override;
 
 private:
+  CPPLanguageRuntime(Process *process);
+
+  lldb::BreakpointResolverSP
+  CreateExceptionResolver(const lldb::BreakpointSP &bkpt, bool catch_bp,
+                          bool throw_bp, bool for_expressions);
+
+  lldb::BreakpointSP CreateExceptionBreakpoint(bool catch_bp, bool throw_bp,
+                                               bool for_expressions,
+                                               bool is_internal);
+
   using OperatorStringToCallableInfoMap =
     llvm::StringMap<CPPLanguageRuntime::LibCppStdFunctionCallableInfo>;
 
   OperatorStringToCallableInfoMap CallableLookupCache;
+
+  lldb::BreakpointSP m_cxx_exception_bp_sp;
+  ItaniumABIRuntime m_itanium_runtime;
 };
 
 } // namespace lldb_private
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.cpp b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.cpp
new file mode 100644
index 0000000000000..0a979e9c321ee
--- /dev/null
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.cpp
@@ -0,0 +1,68 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CommandObjectCPlusPlus.h"
+
+#include "lldb/Core/Mangled.h"
+#include "lldb/Interpreter/CommandReturnObject.h"
+
+using namespace lldb;
+using namespace lldb_private;
+
+CommandObjectCPlusPlusDemangle::CommandObjectCPlusPlusDemangle(
+    CommandInterpreter &interpreter)
+    : CommandObjectParsed(interpreter, "demangle",
+                          "Demangle a C++ mangled name.",
+                          "language cplusplus demangle [<mangled-name> ...]") {
+  AddSimpleArgumentList(eArgTypeSymbol, eArgRepeatPlus);
+}
+
+void CommandObjectCPlusPlusDemangle::DoExecute(Args &command,
+                                               CommandReturnObject &result) {
+  bool demangled_any = false;
+  bool error_any = false;
+  for (auto &entry : command.entries()) {
+    if (entry.ref().empty())
+      continue;
+
+    // the actual Mangled class should be strict about this, but on the
+    // command line if you're copying mangled names out of 'nm' on Darwin,
+    // they will come out with an extra underscore - be willing to strip this
+    // on behalf of the user.   This is the moral equivalent of the -_/-n
+    // options to c++filt
+    auto name = entry.ref();
+    if (name.starts_with("__Z"))
+      name = name.drop_front();
+
+    Mangled mangled(name);
+    if (mangled.GuessLanguage() == lldb::eLanguageTypeC_plus_plus) {
+      ConstString demangled(mangled.GetDisplayDemangledName());
+      demangled_any = true;
+      result.AppendMessageWithFormatv("{0} ---> {1}", entry.c_str(),
+                                      demangled.GetCString());
+    } else {
+      error_any = true;
+      result.AppendErrorWithFormat("%s is not a valid C++ mangled name\n",
+                                   entry.ref().str().c_str());
+    }
+  }
+
+  result.SetStatus(
+      error_any ? lldb::eReturnStatusFailed
+                : (demangled_any ? lldb::eReturnStatusSuccessFinishResult
+                                 : lldb::eReturnStatusSuccessFinishNoResult));
+}
+
+CommandObjectCPlusPlus::CommandObjectCPlusPlus(CommandInterpreter &interpreter)
+    : CommandObjectMultiword(
+          interpreter, "cplusplus",
+          "Commands for operating on the C++ language runtime.",
+          "cplusplus <subcommand> [<subcommand-options>]") {
+  LoadSubCommand("demangle", CommandObjectSP(new CommandObjectCPlusPlusDemangle(
+                                 interpreter)));
+}
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.h b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.h
new file mode 100644
index 0000000000000..f95bf7ae85389
--- /dev/null
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/CommandObjectCPlusPlus.h
@@ -0,0 +1,30 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_COMMANDOBJECTCPLUSPLUS_H
+#define LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_COMMANDOBJECTCPLUSPLUS_H
+
+#include "lldb/Interpreter/CommandObjectMultiword.h"
+namespace lldb_private {
+
+class CommandObjectCPlusPlusDemangle : public CommandObjectParsed {
+public:
+  CommandObjectCPlusPlusDemangle(CommandInterpreter &interpreter);
+
+protected:
+  void DoExecute(Args &command, CommandReturnObject &result) override;
+};
+
+class CommandObjectCPlusPlus : public CommandObjectMultiword {
+public:
+  CommandObjectCPlusPlus(CommandInterpreter &interpreter);
+};
+
+} // namespace lldb_private
+
+#endif
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/CMakeLists.txt b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/CMakeLists.txt
deleted file mode 100644
index a5406c73be933..0000000000000
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/CMakeLists.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-add_lldb_library(lldbPluginCXXItaniumABI PLUGIN
-  ItaniumABILanguageRuntime.cpp
-
-  LINK_LIBS
-    lldbBreakpoint
-    lldbCore
-    lldbInterpreter
-    lldbSymbol
-    lldbTarget
-    lldbValueObject
-    lldbPluginCPPRuntime
-    lldbPluginTypeSystemClang
-  )
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h
deleted file mode 100644
index 7abf2f8547cd5..0000000000000
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//===-- ItaniumABILanguageRuntime.h -----------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_ITANIUMABI_ITANIUMABILANGUAGERUNTIME_H
-#define LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_ITANIUMABI_ITANIUMABILANGUAGERUNTIME_H
-
-#include <map>
-#include <mutex>
-#include <vector>
-
-#include "lldb/Breakpoint/BreakpointResolver.h"
-#include "lldb/Core/Value.h"
-#include "lldb/Symbol/Type.h"
-#include "lldb/Target/LanguageRuntime.h"
-#include "lldb/lldb-private.h"
-
-#include "Plugins/LanguageRuntime/CPlusPlus/CPPLanguageRuntime.h"
-
-namespace lldb_private {
-
-class ItaniumABILanguageRuntime : public lldb_private::CPPLanguageRuntime {
-public:
-  ~ItaniumABILanguageRuntime() override = default;
-
-  // Static Functions
-  static void Initialize();
-
-  static void Terminate();
-
-  static lldb_private::LanguageRuntime *
-  CreateInstance(Process *process, lldb::LanguageType language);
-
-  static llvm::StringRef GetPluginNameStatic() { return "itanium"; }
-
-  static char ID;
-
-  bool isA(const void *ClassID) const override {
-    return ClassID == &ID || CPPLanguageRuntime::isA(ClassID);
-  }
-
-  static bool classof(const LanguageRuntime *runtime) {
-    return runtime->isA(&ID);
-  }
-
-
-  llvm::Expected<LanguageRuntime::VTableInfo>
-  GetVTableInfo(ValueObject &in_value, bool check_type) override;
-
-  bool GetDynamicTypeAndAddress(ValueObject &in_value,
-                                lldb::DynamicValueType use_dynamic,
-                                TypeAndOrName &class_type_or_name,
-                                Address &address, Value::ValueType &value_type,
-                                llvm::ArrayRef<uint8_t> &local_buffer) override;
-
-  TypeAndOrName FixUpDynamicType(const TypeAndOrName &type_and_or_name,
-                                 ValueObject &static_value) override;
-
-  bool CouldHaveDynamicValue(ValueObject &in_value) override;
-
-  void SetExceptionBreakpoints() override;
-
-  void ClearExceptionBreakpoints() override;
-
-  bool ExceptionBreakpointsAreSet() override;
-
-  bool ExceptionBreakpointsExplainStop(lldb::StopInfoSP stop_reason) override;
-
-  lldb::BreakpointResolverSP
-  CreateExceptionResolver(const lldb::BreakpointSP &bkpt,
-                          bool catch_bp, bool throw_bp) override;
-
-  lldb::SearchFilterSP CreateExceptionSearchFilter() override;
-
-  lldb::ValueObjectSP GetExceptionObjectForThread(
-      lldb::ThreadSP thread_sp) override;
-
-  // PluginInterface protocol
-  llvm::StringRef GetPluginName() override { return GetPluginNameStatic(); }
-
-protected:
-  lldb::BreakpointResolverSP
-  CreateExceptionResolver(const lldb::BreakpointSP &bkpt,
-                          bool catch_bp, bool throw_bp, bool for_expressions);
-
-  lldb::BreakpointSP CreateExceptionBreakpoint(bool catch_bp, bool throw_bp,
-                                               bool for_expressions,
-                                               bool is_internal);
-
-private:
-  typedef std::map<lldb_private::Address, TypeAndOrName> DynamicTypeCache;
-  typedef std::map<lldb_private::Address, VTableInfo> VTableInfoCache;
-
-  ItaniumABILanguageRuntime(Process *process)
-      : // Call CreateInstance instead.
-        lldb_private::CPPLanguageRuntime(process) {}
-
-  lldb::BreakpointSP m_cxx_exception_bp_sp;
-  DynamicTypeCache m_dynamic_type_map;
-  VTableInfoCache m_vtable_info_map;
-  std::mutex m_mutex;
-
-  TypeAndOrName GetTypeInfo(ValueObject &in_value,
-                            const VTableInfo &vtable_info);
-
-  TypeAndOrName GetDynamicTypeInfo(const lldb_private::Address &vtable_addr);
-
-  void SetDynamicTypeInfo(const lldb_private::Address &vtable_addr,
-                          const TypeAndOrName &type_info);
-
-  // Check if a compiler type has a vtable.
-  //
-  // If the compiler type is a pointer or a reference, this function will check
-  // if the pointee type has a vtable, else it will check the type passed in.
-  //
-  // Returns an error if the type of the value doesn't have a vtable with an
-  // explanation why, or returns an Error::success() if the type has a vtable.
-  llvm::Error TypeHasVTable(CompilerType compiler_type);
-};
-
-} // namespace lldb_private
-
-#endif // LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_ITANIUMABI_ITANIUMABILANGUAGERUNTIME_H
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.cpp
similarity index 59%
rename from lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp
rename to lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.cpp
index e503be5a3b697..4259b100e09f7 100644
--- a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABI/ItaniumABILanguageRuntime.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.cpp
@@ -1,4 +1,4 @@
-//===-- ItaniumABILanguageRuntime.cpp -------------------------------------===//
+//===----------------------------------------------------------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -6,56 +6,24 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "ItaniumABILanguageRuntime.h"
+#include "ItaniumABIRuntime.h"
 
 #include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
-#include "lldb/Breakpoint/BreakpointLocation.h"
-#include "lldb/Core/Mangled.h"
-#include "lldb/Core/Module.h"
-#include "lldb/Core/PluginManager.h"
 #include "lldb/DataFormatters/FormattersHelpers.h"
 #include "lldb/Expression/DiagnosticManager.h"
 #include "lldb/Expression/FunctionCaller.h"
-#include "lldb/Interpreter/CommandObject.h"
-#include "lldb/Interpreter/CommandObjectMultiword.h"
-#include "lldb/Interpreter/CommandReturnObject.h"
-#include "lldb/Symbol/Symbol.h"
-#include "lldb/Symbol/SymbolFile.h"
-#include "lldb/Symbol/TypeList.h"
-#include "lldb/Target/Process.h"
-#include "lldb/Target/RegisterContext.h"
-#include "lldb/Target/SectionLoadList.h"
-#include "lldb/Target/StopInfo.h"
-#include "lldb/Target/Target.h"
-#include "lldb/Target/Thread.h"
-#include "lldb/Utility/ConstString.h"
 #include "lldb/Utility/LLDBLog.h"
-#include "lldb/Utility/Log.h"
-#include "lldb/Utility/Scalar.h"
-#include "lldb/Utility/Status.h"
-#include "lldb/ValueObject/ValueObject.h"
-#include "lldb/ValueObject/ValueObjectMemory.h"
-
-#include <vector>
 
 using namespace lldb;
 using namespace lldb_private;
 
-LLDB_PLUGIN_DEFINE_ADV(ItaniumABILanguageRuntime, CXXItaniumABI)
-
 static const char *vtable_demangled_prefix = "vtable for ";
 
-char ItaniumABILanguageRuntime::ID = 0;
+ItaniumABIRuntime::ItaniumABIRuntime(Process *process) : m_process(process) {}
 
-bool ItaniumABILanguageRuntime::CouldHaveDynamicValue(ValueObject &in_value) {
-  const bool check_cxx = true;
-  const bool check_objc = false;
-  return in_value.GetCompilerType().IsPossibleDynamicType(nullptr, check_cxx,
-                                                          check_objc);
-}
-
-TypeAndOrName ItaniumABILanguageRuntime::GetTypeInfo(
-    ValueObject &in_value, const VTableInfo &vtable_info) {
+TypeAndOrName
+ItaniumABIRuntime::GetTypeInfo(ValueObject &in_value,
+                               const LanguageRuntime::VTableInfo &vtable_info) {
   if (vtable_info.addr.IsSectionOffset()) {
     // See if we have cached info for this type already
     TypeAndOrName type_info = GetDynamicTypeInfo(vtable_info.addr);
@@ -179,7 +147,7 @@ TypeAndOrName ItaniumABILanguageRuntime::GetTypeInfo(
   return TypeAndOrName();
 }
 
-llvm::Error ItaniumABILanguageRuntime::TypeHasVTable(CompilerType type) {
+llvm::Error ItaniumABIRuntime::TypeHasVTable(CompilerType type) {
   // Check to make sure the class has a vtable.
   CompilerType original_type = type;
   if (type.IsPointerOrReferenceType()) {
@@ -191,7 +159,8 @@ llvm::Error ItaniumABILanguageRuntime::TypeHasVTable(CompilerType type) {
   // Make sure this is a class or a struct first by checking the type class
   // bitfield that gets returned.
   if ((type.GetTypeClass() & (eTypeClassStruct | eTypeClassClass)) == 0) {
-    return llvm::createStringError(std::errc::invalid_argument,
+    return llvm::createStringError(
+        std::errc::invalid_argument,
         "type \"%s\" is not a class or struct or a pointer to one",
         original_type.GetTypeName().AsCString("<invalid>"));
   }
@@ -199,8 +168,8 @@ llvm::Error ItaniumABILanguageRuntime::TypeHasVTable(CompilerType type) {
   // Check if the type has virtual functions by asking it if it is polymorphic.
   if (!type.IsPolymorphicClass()) {
     return llvm::createStringError(std::errc::invalid_argument,
-        "type \"%s\" doesn't have a vtable",
-        type.GetTypeName().AsCString("<invalid>"));
+                                   "type \"%s\" doesn't have a vtable",
+                                   type.GetTypeName().AsCString("<invalid>"));
   }
   return llvm::Error::success();
 }
@@ -213,8 +182,7 @@ llvm::Error ItaniumABILanguageRuntime::TypeHasVTable(CompilerType type) {
 // and is can pass in instances of classes which is not suitable for dynamic
 // type detection, these cases should pass true for \a check_type.
 llvm::Expected<LanguageRuntime::VTableInfo>
- ItaniumABILanguageRuntime::GetVTableInfo(ValueObject &in_value,
-                                          bool check_type) {
+ItaniumABIRuntime::GetVTableInfo(ValueObject &in_value, bool check_type) {
 
   CompilerType type = in_value.GetCompilerType();
   if (check_type) {
@@ -240,7 +208,8 @@ llvm::Expected<LanguageRuntime::VTableInfo>
       process->ReadPointerFromMemory(original_ptr, error);
 
   if (!error.Success() || vtable_load_addr == LLDB_INVALID_ADDRESS)
-    return llvm::createStringError(std::errc::invalid_argument,
+    return llvm::createStringError(
+        std::errc::invalid_argument,
         "failed to read vtable pointer from memory at 0x%" PRIx64,
         original_ptr);
 
@@ -252,8 +221,9 @@ llvm::Expected<LanguageRuntime::VTableInfo>
   Address vtable_addr;
   if (!process->GetTarget().ResolveLoadAddress(vtable_load_addr, vtable_addr))
     return llvm::createStringError(std::errc::invalid_argument,
-                                   "failed to resolve vtable pointer 0x%"
-                                   PRIx64 "to a section", vtable_load_addr);
+                                   "failed to resolve vtable pointer 0x%" PRIx64
+                                   "to a section",
+                                   vtable_load_addr);
 
   // Check our cache first to see if we already have this info
   {
@@ -270,20 +240,21 @@ llvm::Expected<LanguageRuntime::VTableInfo>
                                    vtable_load_addr);
   llvm::StringRef name = symbol->GetMangled().GetDemangledName().GetStringRef();
   if (name.starts_with(vtable_demangled_prefix)) {
-    VTableInfo info = {vtable_addr, symbol};
+    LanguageRuntime::VTableInfo info = {vtable_addr, symbol};
     std::lock_guard<std::mutex> locker(m_mutex);
     auto pos = m_vtable_info_map[vtable_addr] = info;
     return info;
   }
   return llvm::createStringError(std::errc::invalid_argument,
-      "symbol found that contains 0x%" PRIx64 " is not a vtable symbol",
-      vtable_load_addr);
+                                 "symbol found that contains 0x%" PRIx64
+                                 " is not a vtable symbol",
+                                 vtable_load_addr);
 }
 
-bool ItaniumABILanguageRuntime::GetDynamicTypeAndAddress(
+bool ItaniumABIRuntime::GetDynamicTypeAndAddress(
     ValueObject &in_value, lldb::DynamicValueType use_dynamic,
     TypeAndOrName &class_type_or_name, Address &dynamic_address,
-    Value::ValueType &value_type, llvm::ArrayRef<uint8_t> &local_buffer) {
+    Value::ValueType &value_type) {
   // For Itanium, if the type has a vtable pointer in the object, it will be at
   // offset 0 in the object.  That will point to the "address point" within the
   // vtable (not the beginning of the vtable.)  We can then look up the symbol
@@ -291,27 +262,20 @@ bool ItaniumABILanguageRuntime::GetDynamicTypeAndAddress(
   // contain the full class name. The second pointer above the "address point"
   // is the "offset_to_top".  We'll use that to get the start of the value
   // object which holds the dynamic type.
-  //
-
-  class_type_or_name.Clear();
-  value_type = Value::ValueType::Scalar;
-
-  if (!CouldHaveDynamicValue(in_value))
-    return false;
 
   // Check if we have a vtable pointer in this value. If we don't it will
   // return an error, else it will return a valid resolved address. We don't
   // want GetVTableInfo to check the type since we accept void * as a possible
   // dynamic type and that won't pass the type check. We already checked the
   // type above in CouldHaveDynamicValue(...).
-  llvm::Expected<VTableInfo> vtable_info_or_err =
+  llvm::Expected<LanguageRuntime::VTableInfo> vtable_info_or_err =
       GetVTableInfo(in_value, /*check_type=*/false);
   if (!vtable_info_or_err) {
     llvm::consumeError(vtable_info_or_err.takeError());
     return false;
   }
 
-  const VTableInfo &vtable_info = vtable_info_or_err.get();
+  const LanguageRuntime::VTableInfo &vtable_info = vtable_info_or_err.get();
   class_type_or_name = GetTypeInfo(in_value, vtable_info);
 
   if (!class_type_or_name)
@@ -353,146 +317,15 @@ bool ItaniumABILanguageRuntime::GetDynamicTypeAndAddress(
   // the original address.
   lldb::addr_t dynamic_addr =
       in_value.GetPointerValue().address + offset_to_top;
-  if (!m_process->GetTarget().ResolveLoadAddress(
-          dynamic_addr, dynamic_address)) {
+  if (!m_process->GetTarget().ResolveLoadAddress(dynamic_addr,
+                                                 dynamic_address)) {
     dynamic_address.SetRawAddress(dynamic_addr);
   }
   return true;
 }
 
-TypeAndOrName ItaniumABILanguageRuntime::FixUpDynamicType(
-    const TypeAndOrName &type_and_or_name, ValueObject &static_value) {
-  CompilerType static_type(static_value.GetCompilerType());
-  Flags static_type_flags(static_type.GetTypeInfo());
-
-  TypeAndOrName ret(type_and_or_name);
-  if (type_and_or_name.HasType()) {
-    // The type will always be the type of the dynamic object.  If our parent's
-    // type was a pointer, then our type should be a pointer to the type of the
-    // dynamic object.  If a reference, then the original type should be
-    // okay...
-    CompilerType orig_type = type_and_or_name.GetCompilerType();
-    CompilerType corrected_type = orig_type;
-    if (static_type_flags.AllSet(eTypeIsPointer))
-      corrected_type = orig_type.GetPointerType();
-    else if (static_type_flags.AllSet(eTypeIsReference))
-      corrected_type = orig_type.GetLValueReferenceType();
-    ret.SetCompilerType(corrected_type);
-  } else {
-    // If we are here we need to adjust our dynamic type name to include the
-    // correct & or * symbol
-    std::string corrected_name(type_and_or_name.GetName().GetCString());
-    if (static_type_flags.AllSet(eTypeIsPointer))
-      corrected_name.append(" *");
-    else if (static_type_flags.AllSet(eTypeIsReference))
-      corrected_name.append(" &");
-    // the parent type should be a correctly pointer'ed or referenc'ed type
-    ret.SetCompilerType(static_type);
-    ret.SetName(corrected_name.c_str());
-  }
-  return ret;
-}
-
-// Static Functions
-LanguageRuntime *
-ItaniumABILanguageRuntime::CreateInstance(Process *process,
-                                          lldb::LanguageType language) {
-  // FIXME: We have to check the process and make sure we actually know that
-  // this process supports
-  // the Itanium ABI.
-  if (language == eLanguageTypeC_plus_plus ||
-      language == eLanguageTypeC_plus_plus_03 ||
-      language == eLanguageTypeC_plus_plus_11 ||
-      language == eLanguageTypeC_plus_plus_14)
-    return new ItaniumABILanguageRuntime(process);
-  else
-    return nullptr;
-}
-
-class CommandObjectMultiwordItaniumABI_Demangle : public CommandObjectParsed {
-public:
-  CommandObjectMultiwordItaniumABI_Demangle(CommandInterpreter &interpreter)
-      : CommandObjectParsed(
-            interpreter, "demangle", "Demangle a C++ mangled name.",
-            "language cplusplus demangle [<mangled-name> ...]") {
-    AddSimpleArgumentList(eArgTypeSymbol, eArgRepeatPlus);
-  }
-
-  ~CommandObjectMultiwordItaniumABI_Demangle() override = default;
-
-protected:
-  void DoExecute(Args &command, CommandReturnObject &result) override {
-    bool demangled_any = false;
-    bool error_any = false;
-    for (auto &entry : command.entries()) {
-      if (entry.ref().empty())
-        continue;
-
-      // the actual Mangled class should be strict about this, but on the
-      // command line if you're copying mangled names out of 'nm' on Darwin,
-      // they will come out with an extra underscore - be willing to strip this
-      // on behalf of the user.   This is the moral equivalent of the -_/-n
-      // options to c++filt
-      auto name = entry.ref();
-      if (name.starts_with("__Z"))
-        name = name.drop_front();
-
-      Mangled mangled(name);
-      if (mangled.GuessLanguage() == lldb::eLanguageTypeC_plus_plus) {
-        ConstString demangled(mangled.GetDisplayDemangledName());
-        demangled_any = true;
-        result.AppendMessageWithFormatv("{0} ---> {1}", entry.c_str(),
-                                        demangled.GetCString());
-      } else {
-        error_any = true;
-        result.AppendErrorWithFormat("%s is not a valid C++ mangled name\n",
-                                     entry.ref().str().c_str());
-      }
-    }
-
-    result.SetStatus(
-        error_any ? lldb::eReturnStatusFailed
-                  : (demangled_any ? lldb::eReturnStatusSuccessFinishResult
-                                   : lldb::eReturnStatusSuccessFinishNoResult));
-  }
-};
-
-class CommandObjectMultiwordItaniumABI : public CommandObjectMultiword {
-public:
-  CommandObjectMultiwordItaniumABI(CommandInterpreter &interpreter)
-      : CommandObjectMultiword(
-            interpreter, "cplusplus",
-            "Commands for operating on the C++ language runtime.",
-            "cplusplus <subcommand> [<subcommand-options>]") {
-    LoadSubCommand(
-        "demangle",
-        CommandObjectSP(
-            new CommandObjectMultiwordItaniumABI_Demangle(interpreter)));
-  }
-
-  ~CommandObjectMultiwordItaniumABI() override = default;
-};
-
-void ItaniumABILanguageRuntime::Initialize() {
-  PluginManager::RegisterPlugin(
-      GetPluginNameStatic(), "Itanium ABI for the C++ language", CreateInstance,
-      [](CommandInterpreter &interpreter) -> lldb::CommandObjectSP {
-        return CommandObjectSP(
-            new CommandObjectMultiwordItaniumABI(interpreter));
-      });
-}
-
-void ItaniumABILanguageRuntime::Terminate() {
-  PluginManager::UnregisterPlugin(CreateInstance);
-}
-
-BreakpointResolverSP ItaniumABILanguageRuntime::CreateExceptionResolver(
-    const BreakpointSP &bkpt, bool catch_bp, bool throw_bp) {
-  return CreateExceptionResolver(bkpt, catch_bp, throw_bp, false);
-}
-
-BreakpointResolverSP ItaniumABILanguageRuntime::CreateExceptionResolver(
-    const BreakpointSP &bkpt, bool catch_bp, bool throw_bp,
+void ItaniumABIRuntime::AppendExceptionBreakpointFunctions(
+    std::vector<const char *> &names, bool catch_bp, bool throw_bp,
     bool for_expressions) {
   // One complication here is that most users DON'T want to stop at
   // __cxa_allocate_expression, but until we can do anything better with
@@ -505,30 +338,21 @@ BreakpointResolverSP ItaniumABILanguageRuntime::CreateExceptionResolver(
   static const char *g_throw_name1 = "__cxa_throw";
   static const char *g_throw_name2 = "__cxa_rethrow";
   static const char *g_exception_throw_name = "__cxa_allocate_exception";
-  std::vector<const char *> exception_names;
-  exception_names.reserve(4);
+
   if (catch_bp)
-    exception_names.push_back(g_catch_name);
+    names.push_back(g_catch_name);
 
   if (throw_bp) {
-    exception_names.push_back(g_throw_name1);
-    exception_names.push_back(g_throw_name2);
+    names.push_back(g_throw_name1);
+    names.push_back(g_throw_name2);
   }
 
   if (for_expressions)
-    exception_names.push_back(g_exception_throw_name);
-
-  BreakpointResolverSP resolver_sp(new BreakpointResolverName(
-      bkpt, exception_names.data(), exception_names.size(),
-      eFunctionNameTypeBase, eLanguageTypeUnknown, 0, eLazyBoolNo));
-
-  return resolver_sp;
+    names.push_back(g_exception_throw_name);
 }
 
-lldb::SearchFilterSP ItaniumABILanguageRuntime::CreateExceptionSearchFilter() {
-  Target &target = m_process->GetTarget();
-
-  FileSpecList filter_modules;
+void ItaniumABIRuntime::AppendExceptionBreakpointFilterModules(
+    FileSpecList &filter_modules, const Target &target) {
   if (target.GetArchitecture().GetTriple().getVendor() == llvm::Triple::Apple) {
     // Limit the number of modules that are searched for these breakpoints for
     // Apple binaries.
@@ -537,72 +361,10 @@ lldb::SearchFilterSP ItaniumABILanguageRuntime::CreateExceptionSearchFilter() {
     filter_modules.EmplaceBack("libc++abi.1.0.dylib");
     filter_modules.EmplaceBack("libc++abi.1.dylib");
   }
-  return target.GetSearchFilterForModuleList(&filter_modules);
-}
-
-lldb::BreakpointSP ItaniumABILanguageRuntime::CreateExceptionBreakpoint(
-    bool catch_bp, bool throw_bp, bool for_expressions, bool is_internal) {
-  Target &target = m_process->GetTarget();
-  FileSpecList filter_modules;
-  BreakpointResolverSP exception_resolver_sp =
-      CreateExceptionResolver(nullptr, catch_bp, throw_bp, for_expressions);
-  SearchFilterSP filter_sp(CreateExceptionSearchFilter());
-  const bool hardware = false;
-  const bool resolve_indirect_functions = false;
-  return target.CreateBreakpoint(filter_sp, exception_resolver_sp, is_internal,
-                                 hardware, resolve_indirect_functions);
-}
-
-void ItaniumABILanguageRuntime::SetExceptionBreakpoints() {
-  if (!m_process)
-    return;
-
-  const bool catch_bp = false;
-  const bool throw_bp = true;
-  const bool is_internal = true;
-  const bool for_expressions = true;
-
-  // For the exception breakpoints set by the Expression parser, we'll be a
-  // little more aggressive and stop at exception allocation as well.
-
-  if (m_cxx_exception_bp_sp) {
-    m_cxx_exception_bp_sp->SetEnabled(true);
-  } else {
-    m_cxx_exception_bp_sp = CreateExceptionBreakpoint(
-        catch_bp, throw_bp, for_expressions, is_internal);
-    if (m_cxx_exception_bp_sp)
-      m_cxx_exception_bp_sp->SetBreakpointKind("c++ exception");
-  }
-}
-
-void ItaniumABILanguageRuntime::ClearExceptionBreakpoints() {
-  if (!m_process)
-    return;
-
-  if (m_cxx_exception_bp_sp) {
-    m_cxx_exception_bp_sp->SetEnabled(false);
-  }
-}
-
-bool ItaniumABILanguageRuntime::ExceptionBreakpointsAreSet() {
-  return m_cxx_exception_bp_sp && m_cxx_exception_bp_sp->IsEnabled();
-}
-
-bool ItaniumABILanguageRuntime::ExceptionBreakpointsExplainStop(
-    lldb::StopInfoSP stop_reason) {
-  if (!m_process)
-    return false;
-
-  if (!stop_reason || stop_reason->GetStopReason() != eStopReasonBreakpoint)
-    return false;
-
-  uint64_t break_site_id = stop_reason->GetValue();
-  return m_process->GetBreakpointSiteList().StopPointSiteContainsBreakpoint(
-      break_site_id, m_cxx_exception_bp_sp->GetID());
 }
 
-ValueObjectSP ItaniumABILanguageRuntime::GetExceptionObjectForThread(
-    ThreadSP thread_sp) {
+ValueObjectSP
+ItaniumABIRuntime::GetExceptionObjectForThread(ThreadSP thread_sp) {
   if (!thread_sp->SafeToCallFunctions())
     return {};
 
@@ -664,16 +426,16 @@ ValueObjectSP ItaniumABILanguageRuntime::GetExceptionObjectForThread(
   ValueObjectSP exception = ValueObject::CreateValueObjectFromData(
       "exception", exception_isw.GetAsData(m_process->GetByteOrder()), exe_ctx,
       voidstar);
-  ValueObjectSP dyn_exception
-      = exception->GetDynamicValue(eDynamicDontRunTarget);
+  ValueObjectSP dyn_exception =
+      exception->GetDynamicValue(eDynamicDontRunTarget);
   // If we succeed in making a dynamic value, return that:
   if (dyn_exception)
-     return dyn_exception;
+    return dyn_exception;
 
   return exception;
 }
 
-TypeAndOrName ItaniumABILanguageRuntime::GetDynamicTypeInfo(
+TypeAndOrName ItaniumABIRuntime::GetDynamicTypeInfo(
     const lldb_private::Address &vtable_addr) {
   std::lock_guard<std::mutex> locker(m_mutex);
   DynamicTypeCache::const_iterator pos = m_dynamic_type_map.find(vtable_addr);
@@ -683,7 +445,7 @@ TypeAndOrName ItaniumABILanguageRuntime::GetDynamicTypeInfo(
     return pos->second;
 }
 
-void ItaniumABILanguageRuntime::SetDynamicTypeInfo(
+void ItaniumABIRuntime::SetDynamicTypeInfo(
     const lldb_private::Address &vtable_addr, const TypeAndOrName &type_info) {
   std::lock_guard<std::mutex> locker(m_mutex);
   m_dynamic_type_map[vtable_addr] = type_info;
diff --git a/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.h b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.h
new file mode 100644
index 0000000000000..75a8142b26d93
--- /dev/null
+++ b/lldb/source/Plugins/LanguageRuntime/CPlusPlus/ItaniumABIRuntime.h
@@ -0,0 +1,64 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_ITANIUMABIRUNTIME_H
+#define LLDB_SOURCE_PLUGINS_LANGUAGERUNTIME_CPLUSPLUS_ITANIUMABIRUNTIME_H
+
+#include "lldb/Target/LanguageRuntime.h"
+#include "lldb/ValueObject/ValueObject.h"
+
+#include <vector>
+
+namespace lldb_private {
+
+class ItaniumABIRuntime {
+public:
+  ItaniumABIRuntime(Process *process);
+
+  llvm::Expected<LanguageRuntime::VTableInfo>
+  GetVTableInfo(ValueObject &in_value, bool check_type);
+
+  bool GetDynamicTypeAndAddress(ValueObject &in_value,
+                                lldb::DynamicValueType use_dynamic,
+                                TypeAndOrName &class_type_or_name,
+                                Address &dynamic_address,
+                                Value::ValueType &value_type);
+
+  void AppendExceptionBreakpointFunctions(std::vector<const char *> &names,
+                                          bool catch_bp, bool throw_bp,
+                                          bool for_expressions);
+
+  void AppendExceptionBreakpointFilterModules(FileSpecList &list,
+                                              const Target &target);
+
+  lldb::ValueObjectSP GetExceptionObjectForThread(lldb::ThreadSP thread_sp);
+
+private:
+  TypeAndOrName GetTypeInfo(ValueObject &in_value,
+                            const LanguageRuntime::VTableInfo &vtable_info);
+
+  llvm::Error TypeHasVTable(CompilerType type);
+
+  TypeAndOrName GetDynamicTypeInfo(const lldb_private::Address &vtable_addr);
+
+  void SetDynamicTypeInfo(const lldb_private::Address &vtable_addr,
+                          const TypeAndOrName &type_info);
+
+  using DynamicTypeCache = std::map<Address, TypeAndOrName>;
+  using VTableInfoCache = std::map<Address, LanguageRuntime::VTableInfo>;
+
+  DynamicTypeCache m_dynamic_type_map;
+  VTableInfoCache m_vtable_info_map;
+  std::mutex m_mutex;
+
+  Process *m_process;
+};
+
+} // namespace lldb_private
+
+#endif
diff --git a/lldb/source/Plugins/REPL/Clang/CMakeLists.txt b/lldb/source/Plugins/REPL/Clang/CMakeLists.txt
index c020ab82ad292..3ecc37a391e19 100644
--- a/lldb/source/Plugins/REPL/Clang/CMakeLists.txt
+++ b/lldb/source/Plugins/REPL/Clang/CMakeLists.txt
@@ -10,6 +10,5 @@ add_lldb_library(lldbPluginClangREPL PLUGIN
     lldbSymbol
     lldbTarget
     lldbUtility
-    lldbPluginCPPRuntime
     lldbPluginTypeSystemClang
 )

>From b5f3ea1fe4bb2945adc10780255d1aee8e77685c Mon Sep 17 00:00:00 2001
From: Aiden Grossman <aidengrossman at google.com>
Date: Sat, 7 Mar 2026 12:44:09 -0800
Subject: [PATCH 19/33] [Bazel] Fixes 58efc42 (#185227)

Fixes 58efc42.
---
 .../lldb/source/Plugins/BUILD.bazel           | 27 +++----------------
 .../lldb/source/Plugins/plugin_config.bzl     |  2 +-
 2 files changed, 5 insertions(+), 24 deletions(-)

diff --git a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
index ca492d223d8e2..42729d83d7d7e 100644
--- a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
@@ -100,9 +100,13 @@ cc_library(
     hdrs = glob(["LanguageRuntime/CPlusPlus/*.h"]),
     includes = [".."],
     deps = [
+        ":PluginTypeSystemClangHeaders",
         "//clang:codegen",
         "//lldb:CoreHeaders",
+        "//lldb:DataFormattersHeaders",
+        "//lldb:ExpressionHeaders",
         "//lldb:Headers",
+        "//lldb:InterpreterHeaders",
         "//lldb:SymbolHeaders",
         "//lldb:TargetHeaders",
         "//lldb:Utility",
@@ -2338,29 +2342,6 @@ cc_library(
     ],
 )
 
-cc_library(
-    name = "PluginCXXItaniumABI",
-    srcs = glob(["LanguageRuntime/CPlusPlus/ItaniumABI/*.cpp"]),
-    hdrs = glob(["LanguageRuntime/CPlusPlus/ItaniumABI/*.h"]),
-    includes = [".."],
-    deps = [
-        ":PluginCPPRuntime",
-        ":PluginTypeSystemClang",
-        ":PluginTypeSystemClangHeaders",
-        "//lldb:Breakpoint",
-        "//lldb:Core",
-        "//lldb:DataFormattersHeaders",
-        "//lldb:ExpressionHeaders",
-        "//lldb:Headers",
-        "//lldb:InterpreterHeaders",
-        "//lldb:Symbol",
-        "//lldb:SymbolHeaders",
-        "//lldb:Target",
-        "//lldb:TargetHeaders",
-        "//lldb:Utility",
-    ],
-)
-
 cc_library(
     name = "PluginGNUstepObjCRuntime",
     srcs = glob(["LanguageRuntime/ObjC/GNUstepObjCRuntime/*.cpp"]),
diff --git a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/plugin_config.bzl b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/plugin_config.bzl
index 26b6a84e2138f..f7a20a248c89a 100644
--- a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/plugin_config.bzl
+++ b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/plugin_config.bzl
@@ -23,7 +23,7 @@ DEFAULT_PLUGINS = [
     "ArchitecturePPC64",
     "ClangREPL",
     "CPlusPlusLanguage",
-    "CXXItaniumABI",
+    "CPPRuntime",
     "DisassemblerLLVMC",
     "HighlighterClang",
     "HighlighterDefault",

>From ff11354911024ff443145e2a502e83f42066418a Mon Sep 17 00:00:00 2001
From: owenca <owenpiano at gmail.com>
Date: Sat, 7 Mar 2026 12:47:12 -0800
Subject: [PATCH 20/33] [clang-format][NFC] Use the newly added
 AllowLiteralDigitSeparator (#185165)

Use LangOptions::AllowLiteralDigitSeparator added in #184235 for the
IntegerLiteralSeparator option.
---
 clang/lib/Format/IntegerLiteralSeparatorFixer.cpp | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
index 5f40100c2b968..eea9fcc56af63 100644
--- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
+++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -44,6 +44,8 @@ static Base getBase(StringRef IntegerLiteral) {
 std::pair<tooling::Replacements, unsigned>
 IntegerLiteralSeparatorFixer::process(const Environment &Env,
                                       const FormatStyle &Style) {
+  const auto LangOpts = getFormattingLangOpts(Style);
+
   switch (Style.Language) {
   case FormatStyle::LK_CSharp:
   case FormatStyle::LK_Java:
@@ -53,11 +55,10 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
   case FormatStyle::LK_C:
   case FormatStyle::LK_Cpp:
   case FormatStyle::LK_ObjC:
-    if (Style.Standard >= FormatStyle::LS_Cpp14) {
-      Separator = '\'';
-      break;
-    }
-    [[fallthrough]];
+    if (!LangOpts.AllowLiteralDigitSeparator)
+      return {};
+    Separator = '\'';
+    break;
   default:
     return {};
   }
@@ -94,7 +95,6 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
   AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges());
 
   const auto ID = Env.getFileID();
-  const auto LangOpts = getFormattingLangOpts(Style);
   Lexer Lex(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts);
   Lex.SetCommentRetentionState(true);
 

>From d7ff97db15026860f1f9c95e8fbf9f81e7120be3 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Sat, 7 Mar 2026 17:23:11 -0500
Subject: [PATCH 21/33] [libc++] Support build failures when running LNT
 benchmarks (#185234)

It's rare but possible for the codebase not to build. When that happens,
we should carry on and still submit an empty LNT report for that order,
otherwise we'll get stuck thinking that order hasn't been benchmarked
yet.
---
 libcxx/utils/ci/lnt/run-benchmarks | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libcxx/utils/ci/lnt/run-benchmarks b/libcxx/utils/ci/lnt/run-benchmarks
index 40c58eaff54e7..256871b6a8576 100755
--- a/libcxx/utils/ci/lnt/run-benchmarks
+++ b/libcxx/utils/ci/lnt/run-benchmarks
@@ -127,11 +127,12 @@ def main(argv):
         run([build_dir / '.venv/bin/pip', 'install', 'llvm-lnt'])
 
         logging.info(f'Building libc++ at commit {args.benchmark_commit}')
-        run([args.git_repo / 'libcxx/utils/build-at-commit',
+        build_cmd = [args.git_repo / 'libcxx/utils/build-at-commit',
                         '--git-repo', args.git_repo,
                         '--install-dir', build_dir / 'install',
                         '--commit', args.benchmark_commit,
-                        '--', '-DCMAKE_BUILD_TYPE=RelWithDebInfo', f'-DCMAKE_CXX_COMPILER={args.compiler}'])
+                        '--', '-DCMAKE_BUILD_TYPE=RelWithDebInfo', f'-DCMAKE_CXX_COMPILER={args.compiler}']
+        run(build_cmd, enforce_success=False) # if the build fails, carry on: we'll fail later and submit empty LNT results
 
         logging.info(f'Running benchmarks from {args.test_suite_commit} against libc++ {args.benchmark_commit}')
         cmd = [args.git_repo / 'libcxx/utils/test-at-commit',

>From baac39c0d2a46a1ff6d810168bd6277008fbd0bf Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Sun, 8 Mar 2026 00:28:51 +0100
Subject: [PATCH 22/33] libclc: Add amdgpu wait_group_events (#185177)

---
 libclc/opencl/lib/amdgcn/SOURCES              |  1 +
 .../lib/amdgcn/async/wait_group_events.cl     | 23 +++++++++++++++++++
 2 files changed, 24 insertions(+)
 create mode 100644 libclc/opencl/lib/amdgcn/async/wait_group_events.cl

diff --git a/libclc/opencl/lib/amdgcn/SOURCES b/libclc/opencl/lib/amdgcn/SOURCES
index e52f54789bfab..c9d29fb2d1905 100644
--- a/libclc/opencl/lib/amdgcn/SOURCES
+++ b/libclc/opencl/lib/amdgcn/SOURCES
@@ -1,3 +1,4 @@
+async/wait_group_events.cl
 mem_fence/fence.cl
 subgroup/subgroup.cl
 synchronization/sub_group_barrier.cl
diff --git a/libclc/opencl/lib/amdgcn/async/wait_group_events.cl b/libclc/opencl/lib/amdgcn/async/wait_group_events.cl
new file mode 100644
index 0000000000000..fac33f73af1c4
--- /dev/null
+++ b/libclc/opencl/lib/amdgcn/async/wait_group_events.cl
@@ -0,0 +1,23 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <clc/opencl/opencl-base.h>
+
+_CLC_DEF _CLC_OVERLOAD void wait_group_events(int n, __private event_t *evs) {
+  (void)n;
+  (void)evs;
+  work_group_barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE,
+                     memory_scope_work_group);
+}
+
+_CLC_DEF _CLC_OVERLOAD void wait_group_events(int n, __generic event_t *evs) {
+  (void)n;
+  (void)evs;
+  work_group_barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE,
+                     memory_scope_work_group);
+}

>From a2231642b341c71ba184af2b3831502fded401da Mon Sep 17 00:00:00 2001
From: William Tran-Viet <wtranviet at proton.me>
Date: Sat, 7 Mar 2026 20:58:06 -0500
Subject: [PATCH 23/33] [libc++][NFC] Simplify `optional::iterator` (#183230)

- Rename `__optional_iterator` into `__optional_iterator_base` and make
it part of the `__optional_{meow}_base` inheritance chain. This allows
us to get rid of a sketchy-looking downcast and shorten some code.
---
 libcxx/include/__iterator/wrap_iter.h |  2 --
 libcxx/include/optional               | 43 ++++++++++++++-------------
 2 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/libcxx/include/__iterator/wrap_iter.h b/libcxx/include/__iterator/wrap_iter.h
index d20d7f3fc4c4c..c1b27b8242cd9 100644
--- a/libcxx/include/__iterator/wrap_iter.h
+++ b/libcxx/include/__iterator/wrap_iter.h
@@ -117,8 +117,6 @@ class __wrap_iter {
   friend class span;
   template <class _Tp, size_t _Size>
   friend struct array;
-  template <class _Tp, class>
-  friend struct __optional_iterator;
 
   _LIBCPP_HIDE_FROM_ABI friend _LIBCPP_CONSTEXPR bool
   operator==(const __wrap_iter& __x, const __wrap_iter& __y) _NOEXCEPT {
diff --git a/libcxx/include/optional b/libcxx/include/optional
index a928769d65c90..c4b52dab6ea66 100644
--- a/libcxx/include/optional
+++ b/libcxx/include/optional
@@ -706,17 +706,21 @@ inline constexpr bool __is_constructible_for_optional_initializer_list_v<_Tp&, _
 #    endif
 
 template <class _Tp, class = void>
-struct __optional_iterator {};
+struct __optional_iterator_base : __optional_move_assign_base<_Tp> {
+  using __optional_move_assign_base<_Tp>::__optional_move_assign_base;
+};
 
 #    if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPERIMENTAL_OPTIONAL_ITERATOR
 
 template <class _Tp>
-struct __optional_iterator<_Tp, enable_if_t<is_object_v<_Tp>>> {
+struct __optional_iterator_base<_Tp, enable_if_t<is_object_v<_Tp>>> : __optional_move_assign_base<_Tp> {
 private:
   using __pointer _LIBCPP_NODEBUG       = add_pointer_t<_Tp>;
   using __const_pointer _LIBCPP_NODEBUG = add_pointer_t<const _Tp>;
 
 public:
+  using __optional_move_assign_base<_Tp>::__optional_move_assign_base;
+
 #      ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_OPTIONAL
   using iterator       = __bounded_iter<__pointer>;
   using const_iterator = __bounded_iter<__const_pointer>;
@@ -727,41 +731,42 @@ public:
 
   // [optional.iterators], iterator support
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iterator begin() noexcept {
-    auto& __derived_self = static_cast<optional<_Tp>&>(*this);
-    auto* __ptr          = std::addressof(__derived_self.__get());
+    auto* __ptr = std::addressof(this->__get());
 
 #      ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_OPTIONAL
-    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (__derived_self.has_value() ? 1 : 0));
+    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (this->has_value() ? 1 : 0));
 #      else
     return std::__make_capacity_aware_iterator<__pointer, optional<_Tp>, 1>(__ptr);
 #      endif
   }
 
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const_iterator begin() const noexcept {
-    auto& __derived_self = static_cast<const optional<_Tp>&>(*this);
-    auto* __ptr          = std::addressof(__derived_self.__get());
+    auto* __ptr = std::addressof(this->__get());
 
 #      ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_OPTIONAL
-    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (__derived_self.has_value() ? 1 : 0));
+    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (this->has_value() ? 1 : 0));
 #      else
     return std::__make_capacity_aware_iterator<__const_pointer, optional<_Tp>, 1>(__ptr);
 #      endif
   }
 
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iterator end() noexcept {
-    return begin() + (static_cast<optional<_Tp>&>(*this).has_value() ? 1 : 0);
+    return begin() + (this->has_value() ? 1 : 0);
   }
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const_iterator end() const noexcept {
-    return begin() + (static_cast<const optional<_Tp>&>(*this).has_value() ? 1 : 0);
+    return begin() + (this->has_value() ? 1 : 0);
   }
 };
 
 template <class _Tp>
-struct __optional_iterator<_Tp&, enable_if_t<is_object_v<_Tp> && !__is_unbounded_array_v<_Tp> >> {
+struct __optional_iterator_base<_Tp&, enable_if_t<is_object_v<_Tp> && !__is_unbounded_array_v<_Tp> >>
+    : __optional_move_assign_base<_Tp&> {
 private:
   using __pointer _LIBCPP_NODEBUG = add_pointer_t<_Tp>;
 
 public:
+  using __optional_move_assign_base<_Tp&>::__optional_move_assign_base;
+
 #      ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_OPTIONAL
   using iterator = __bounded_iter<__pointer>;
 #      else
@@ -771,18 +776,17 @@ public:
   // [optional.ref.iterators], iterator support
 
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto begin() const noexcept {
-    auto& __derived_self = static_cast<const optional<_Tp&>&>(*this);
-    auto* __ptr          = __derived_self.has_value() ? std::addressof(__derived_self.__get()) : nullptr;
+    auto* __ptr = this->has_value() ? std::addressof(this->__get()) : nullptr;
 
 #      ifdef _LIBCPP_ABI_BOUNDED_ITERATORS_IN_OPTIONAL
-    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (__derived_self.has_value() ? 1 : 0));
+    return std::__make_bounded_iter(__ptr, __ptr, __ptr + (this->has_value() ? 1 : 0));
 #      else
-    return std::__make_capacity_aware_iterator<__pointer, optional<_Tp&>, 1>(__pointer(__ptr));
+    return std::__make_capacity_aware_iterator<__pointer, optional<_Tp&>, 1>(__ptr);
 #      endif
   }
 
   [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto end() const noexcept {
-    return begin() + (static_cast<const optional<_Tp&>&>(*this).has_value() ? 1 : 0);
+    return begin() + (this->has_value() ? 1 : 0);
   }
 };
 
@@ -790,11 +794,10 @@ public:
 
 template <class _Tp>
 class _LIBCPP_DECLSPEC_EMPTY_BASES optional
-    : private __optional_move_assign_base<_Tp>,
+    : public __optional_iterator_base<_Tp>,
       private __optional_sfinae_ctor_base_t<_Tp>,
-      private __optional_sfinae_assign_base_t<_Tp>,
-      public __optional_iterator<_Tp> {
-  using __base _LIBCPP_NODEBUG = __optional_move_assign_base<_Tp>;
+      private __optional_sfinae_assign_base_t<_Tp> {
+  using __base _LIBCPP_NODEBUG = __optional_iterator_base<_Tp>;
 
 public:
   using value_type = __libcpp_remove_reference_t<_Tp>;

>From b883091badd812063adff715f27e414af74b371c Mon Sep 17 00:00:00 2001
From: Lang Hames <lhames at gmail.com>
Date: Sun, 8 Mar 2026 14:28:04 +1100
Subject: [PATCH 24/33] [ORC] Further simplify ContainerElementsMap::hoistDeps.
 NFCI. (#185242)

At the one call-site where the result of hoistDeps is used,
self-dependencies will already have been eliminated. That means we can
use the "deps changed" property that we're already computing rather than
tracking "deps graph changed" separately.
---
 llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
index 2ffd6bd6cbffd..28780b0433bda 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
@@ -280,9 +280,7 @@ template <typename ContainerIdT, typename ElementIdT> class WaitingOnGraph {
     /// Returns true if SuperNodeDeps was changed.
     bool hoistDeps(SuperNodeDepsMap &SuperNodeDeps,
                    ElemToSuperNodeMap &ElemToSN) {
-      bool SuperNodeDepsChanged = false;
-
-      Deps.visit([&](ContainerId &Container, ElementSet &Elements) {
+      return Deps.visit([&](ContainerId &Container, ElementSet &Elements) {
         auto I = ElemToSN.find(Container);
         if (I == ElemToSN.end())
           return false;
@@ -294,15 +292,11 @@ template <typename ContainerIdT, typename ElementIdT> class WaitingOnGraph {
             return false;
 
           auto *DefSN = J->second;
-          if (DefSN != this) {
-            SuperNodeDepsChanged = true;
+          if (DefSN != this)
             SuperNodeDeps[DefSN].insert(this);
-          }
           return true;
         });
       });
-
-      return SuperNodeDepsChanged;
     }
   };
 

>From 088f740035f0ab13d122b06fa723ac4ac07a5142 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Sat, 7 Mar 2026 22:34:17 -0600
Subject: [PATCH 25/33] [libclc] Add custom CMake language handling for OpenCL
 files (#185243)

Summary:
The current handling of `libclc` is to use custom clang jobs to compile
source files. This is the way we used to compile GPU libraries, but we
have largely moved away from this. The eventual goal is to simple be
able to use `clang` like a normal project. One barrier to this is the
lack of language support for OpenCL in CMake.

This PR uses CMake's custom language support to define a minimal OpenCL
language, largely just a wrapper around `clang -x cl`. This does
nothing, just enables the support. Future PRs will need to untangle the
mess of dependencies.

The 'link+opt' steps that we now do should be able to simply be done as
a custom `llvm-link` and `opt` job on the library, which isn't ideal but
it still better than the current state.
---
 libclc/CMakeLists.txt                         |  2 ++
 .../cmake/modules/CMakeCLCCompiler.cmake.in   | 13 +++++++
 libclc/cmake/modules/CMakeCLCCompilerTest.cl  |  1 +
 .../cmake/modules/CMakeCLCInformation.cmake   | 26 ++++++++++++++
 .../modules/CMakeDetermineCLCCompiler.cmake   | 18 ++++++++++
 .../cmake/modules/CMakeTestCLCCompiler.cmake  | 34 +++++++++++++++++++
 6 files changed, 94 insertions(+)
 create mode 100644 libclc/cmake/modules/CMakeCLCCompiler.cmake.in
 create mode 100644 libclc/cmake/modules/CMakeCLCCompilerTest.cl
 create mode 100644 libclc/cmake/modules/CMakeCLCInformation.cmake
 create mode 100644 libclc/cmake/modules/CMakeDetermineCLCCompiler.cmake
 create mode 100644 libclc/cmake/modules/CMakeTestCLCCompiler.cmake

diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt
index 1aa67b0745781..6448bb8b3df5b 100644
--- a/libclc/CMakeLists.txt
+++ b/libclc/CMakeLists.txt
@@ -10,6 +10,8 @@ set(CMAKE_CXX_STANDARD 17)
 # Add path for custom modules
 list( INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules" )
 
+enable_language( CLC )
+
 set( LIBCLC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR} )
 set( LIBCLC_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} )
 set( LIBCLC_OBJFILE_DIR ${LIBCLC_BINARY_DIR}/obj.libclc.dir )
diff --git a/libclc/cmake/modules/CMakeCLCCompiler.cmake.in b/libclc/cmake/modules/CMakeCLCCompiler.cmake.in
new file mode 100644
index 0000000000000..ac7856ef76c62
--- /dev/null
+++ b/libclc/cmake/modules/CMakeCLCCompiler.cmake.in
@@ -0,0 +1,13 @@
+set(CMAKE_CLC_COMPILER "@CMAKE_CLC_COMPILER@")
+set(CMAKE_CLC_COMPILER_ID "@CMAKE_CLC_COMPILER_ID@")
+
+set(CMAKE_CLC_COMPILER_LOADED 1)
+set(CMAKE_CLC_COMPILER_WORKS @CMAKE_CLC_COMPILER_WORKS@)
+
+set(CMAKE_CLC_COMPILER_ENV_VAR "CLC")
+set(CMAKE_CLC_COMPILER_ID_RUN 1)
+
+set(CMAKE_CLC_SOURCE_FILE_EXTENSIONS cl)
+
+set(CMAKE_CLC_LINKER_PREFERENCE 0)
+set(CMAKE_CLC_LINKER_PREFERENCE_PROPAGATES 0)
diff --git a/libclc/cmake/modules/CMakeCLCCompilerTest.cl b/libclc/cmake/modules/CMakeCLCCompilerTest.cl
new file mode 100644
index 0000000000000..e84fd83ba7edb
--- /dev/null
+++ b/libclc/cmake/modules/CMakeCLCCompilerTest.cl
@@ -0,0 +1 @@
+kernel void __test_clc(global int *out) { *out = 0; }
diff --git a/libclc/cmake/modules/CMakeCLCInformation.cmake b/libclc/cmake/modules/CMakeCLCInformation.cmake
new file mode 100644
index 0000000000000..f92592221f034
--- /dev/null
+++ b/libclc/cmake/modules/CMakeCLCInformation.cmake
@@ -0,0 +1,26 @@
+set(CMAKE_CLC_OUTPUT_EXTENSION .o)
+set(CMAKE_INCLUDE_FLAG_CLC "-I")
+
+set(CMAKE_CLC_DEPFILE_FORMAT gcc)
+set(CMAKE_DEPFILE_FLAGS_CLC "-MD -MT <DEP_TARGET> -MF <DEP_FILE>")
+
+cmake_initialize_per_config_variable(CMAKE_CLC_FLAGS "Flags used by the CLC compiler")
+
+if(NOT CMAKE_CLC_COMPILE_OBJECT)
+  set(CMAKE_CLC_COMPILE_OBJECT
+    "<CMAKE_CLC_COMPILER> -x cl <DEFINES> <INCLUDES> <FLAGS> -c -o <OBJECT> <SOURCE>")
+endif()
+
+if(NOT DEFINED CMAKE_CLC_ARCHIVE_CREATE)
+  set(CMAKE_CLC_ARCHIVE_CREATE "<CMAKE_AR> qc <TARGET> <LINK_FLAGS> <OBJECTS>")
+endif()
+if(NOT DEFINED CMAKE_CLC_ARCHIVE_APPEND)
+  set(CMAKE_CLC_ARCHIVE_APPEND "<CMAKE_AR> q <TARGET> <LINK_FLAGS> <OBJECTS>")
+endif()
+if(NOT DEFINED CMAKE_CLC_ARCHIVE_FINISH)
+  set(CMAKE_CLC_ARCHIVE_FINISH "<CMAKE_RANLIB> <TARGET>")
+endif()
+
+set(CMAKE_CLC_USE_LINKER_INFORMATION FALSE)
+
+set(CMAKE_CLC_INFORMATION_LOADED 1)
diff --git a/libclc/cmake/modules/CMakeDetermineCLCCompiler.cmake b/libclc/cmake/modules/CMakeDetermineCLCCompiler.cmake
new file mode 100644
index 0000000000000..2138ad85d0059
--- /dev/null
+++ b/libclc/cmake/modules/CMakeDetermineCLCCompiler.cmake
@@ -0,0 +1,18 @@
+if(NOT CMAKE_CLC_COMPILER)
+  if(NOT CMAKE_C_COMPILER_ID MATCHES "Clang")
+    message(FATAL_ERROR
+      "The CLC language requires the C compiler (CMAKE_C_COMPILER) to be "
+      "Clang, but CMAKE_C_COMPILER_ID is '${CMAKE_C_COMPILER_ID}'.")
+  endif()
+  set(CMAKE_CLC_COMPILER "${CMAKE_C_COMPILER}" CACHE FILEPATH "CLC compiler")
+endif()
+
+mark_as_advanced(CMAKE_CLC_COMPILER)
+
+set(CMAKE_CLC_COMPILER_ID "Clang")
+set(CMAKE_CLC_COMPILER_ID_RUN TRUE)
+
+configure_file(${CMAKE_CURRENT_LIST_DIR}/CMakeCLCCompiler.cmake.in
+  ${CMAKE_PLATFORM_INFO_DIR}/CMakeCLCCompiler.cmake @ONLY)
+
+set(CMAKE_CLC_COMPILER_ENV_VAR "CLC")
diff --git a/libclc/cmake/modules/CMakeTestCLCCompiler.cmake b/libclc/cmake/modules/CMakeTestCLCCompiler.cmake
new file mode 100644
index 0000000000000..d18a818f93bfb
--- /dev/null
+++ b/libclc/cmake/modules/CMakeTestCLCCompiler.cmake
@@ -0,0 +1,34 @@
+configure_file(${CMAKE_CURRENT_LIST_DIR}/CMakeCLCCompiler.cmake.in
+  ${CMAKE_PLATFORM_INFO_DIR}/CMakeCLCCompiler.cmake @ONLY)
+include(${CMAKE_PLATFORM_INFO_DIR}/CMakeCLCCompiler.cmake)
+
+if(CMAKE_CLC_COMPILER_FORCED)
+  set(CMAKE_CLC_COMPILER_WORKS TRUE)
+  return()
+endif()
+
+set(_test_file "${CMAKE_CURRENT_LIST_DIR}/CMakeCLCCompilerTest.cl")
+set(_test_dir "${CMAKE_PLATFORM_INFO_DIR}/CMakeTmp")
+set(_test_out "${_test_dir}/test_clc.o")
+file(MAKE_DIRECTORY "${_test_dir}")
+
+message(STATUS "Check for working CLC compiler: ${CMAKE_CLC_COMPILER}")
+
+execute_process(
+  COMMAND "${CMAKE_CLC_COMPILER}" -x cl -c -flto
+          -o "${_test_out}" "${_test_file}"
+  RESULT_VARIABLE _clc_result
+  ERROR_VARIABLE _clc_error
+)
+
+if(_clc_result EQUAL 0)
+  set(CMAKE_CLC_COMPILER_WORKS TRUE)
+  message(STATUS "Check for working CLC compiler: ${CMAKE_CLC_COMPILER} - works")
+  file(REMOVE "${_test_out}")
+else()
+  message(FATAL_ERROR
+    "The CLC compiler\n"
+    "  ${CMAKE_CLC_COMPILER}\n"
+    "is not able to compile a simple OpenCL test program.\n"
+    "Output:\n${_clc_error}")
+endif()

>From 0cad9b7585d9c5dbb59de0b54b0a6f7b99da1c69 Mon Sep 17 00:00:00 2001
From: Lucas Chollet <lucas.chollet at free.fr>
Date: Sun, 8 Mar 2026 05:50:46 +0100
Subject: [PATCH 26/33] [llvm] Prefer the unknown vendor for SerenityOS triples
 (#185026)

No functional change. This patch updates the target-triple unit tests to
use the ones that we care about.
---
 llvm/unittests/TargetParser/TripleTest.cpp | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/llvm/unittests/TargetParser/TripleTest.cpp b/llvm/unittests/TargetParser/TripleTest.cpp
index 30ca4d0dcebd3..5f83fdd0ba000 100644
--- a/llvm/unittests/TargetParser/TripleTest.cpp
+++ b/llvm/unittests/TargetParser/TripleTest.cpp
@@ -1386,15 +1386,21 @@ TEST(TripleTest, ParsedIDs) {
   EXPECT_EQ(Triple::LiteOS, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 
-  T = Triple("x86_64-pc-serenity");
+  T = Triple("x86_64-unknown-serenity");
   EXPECT_EQ(Triple::x86_64, T.getArch());
-  EXPECT_EQ(Triple::PC, T.getVendor());
+  EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
   EXPECT_EQ(Triple::Serenity, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 
-  T = Triple("aarch64-pc-serenity");
+  T = Triple("aarch64-unknown-serenity");
   EXPECT_EQ(Triple::aarch64, T.getArch());
-  EXPECT_EQ(Triple::PC, T.getVendor());
+  EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
+  EXPECT_EQ(Triple::Serenity, T.getOS());
+  EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
+
+  T = Triple("riscv64-unknown-serenity");
+  EXPECT_EQ(Triple::riscv64, T.getArch());
+  EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
   EXPECT_EQ(Triple::Serenity, T.getOS());
   EXPECT_EQ(Triple::UnknownEnvironment, T.getEnvironment());
 

>From 3e0d6c81f22edaa8f13a54a1094ebacff1dbc7a2 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Sun, 8 Mar 2026 07:17:06 +0100
Subject: [PATCH 27/33] [clang][bytecode] Check for errors in CXXDeleteExprs
 (#185189)

e.g. `getOperatorDelete()` may return null for those.
---
 clang/lib/AST/ByteCode/Compiler.cpp    | 6 +++---
 clang/test/AST/ByteCode/new-delete.cpp | 9 +++++++++
 2 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 93ad8eb26f29e..6d8b84a83a4ad 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -4136,15 +4136,15 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) {
 
 template <class Emitter>
 bool Compiler<Emitter>::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
-  const Expr *Arg = E->getArgument();
-
+  if (E->containsErrors())
+    return false;
   const FunctionDecl *OperatorDelete = E->getOperatorDelete();
 
   if (!OperatorDelete->isUsableAsGlobalAllocationFunctionInConstantEvaluation())
     return this->emitInvalidNewDeleteExpr(E, E);
 
   // Arg must be an lvalue.
-  if (!this->visit(Arg))
+  if (!this->visit(E->getArgument()))
     return false;
 
   return this->emitFree(E->isArrayForm(), E->isGlobalDelete(), E);
diff --git a/clang/test/AST/ByteCode/new-delete.cpp b/clang/test/AST/ByteCode/new-delete.cpp
index 4e2af79a24811..bf892e79a67ac 100644
--- a/clang/test/AST/ByteCode/new-delete.cpp
+++ b/clang/test/AST/ByteCode/new-delete.cpp
@@ -1135,6 +1135,15 @@ namespace NonLiteralType {
   }
 }
 
+namespace BrokenDelete {
+  constexpr void foo() {
+    F *f = /* missing */; // both-error {{unknown type name 'F'}} \
+                          // both-error {{expected expression}}
+
+    delete f;
+  }
+}
+
 #else
 /// Make sure we reject this prior to C++20
 constexpr int a() { // both-error {{never produces a constant expression}}

>From 3fb55b6872c1534fb965bd758d3a6f4d5abf495d Mon Sep 17 00:00:00 2001
From: Alexey Samsonov <vonosmas at gmail.com>
Date: Sat, 7 Mar 2026 22:22:36 -0800
Subject: [PATCH 28/33] [libc] Add <sys/un.h> header. (#182622)

`sockaddr_un` structure is supposed to be provided by the `<sys/un.h>`
header. Add this header to llvm-libc, and move the declaration of
`sockaddr_un` there from `<sys/socket.h>`. See
https://man7.org/linux/man-pages/man0/sys_un.h.0p.html

Add proxy headers for the `<sys/socket.h>` macro (like `AF_UNIX`) and
for the `struct sockaddr_un` so that the tests can be more hermetic and
avoid system header inclusion.
---
 libc/config/linux/aarch64/headers.txt         |  1 +
 libc/config/linux/x86_64/headers.txt          |  1 +
 libc/hdr/CMakeLists.txt                       |  9 ++++++++
 libc/hdr/sys_socket_macros.h                  | 22 +++++++++++++++++++
 libc/hdr/types/CMakeLists.txt                 |  9 ++++++++
 libc/hdr/types/struct_sockaddr_un.h           | 21 ++++++++++++++++++
 libc/include/CMakeLists.txt                   | 10 ++++++++-
 libc/include/sys/socket.yaml                  |  1 -
 libc/include/sys/un.yaml                      |  5 +++++
 libc/test/src/sys/socket/linux/CMakeLists.txt |  2 ++
 libc/test/src/sys/socket/linux/bind_test.cpp  |  4 ++--
 11 files changed, 81 insertions(+), 4 deletions(-)
 create mode 100644 libc/hdr/sys_socket_macros.h
 create mode 100644 libc/hdr/types/struct_sockaddr_un.h
 create mode 100644 libc/include/sys/un.yaml

diff --git a/libc/config/linux/aarch64/headers.txt b/libc/config/linux/aarch64/headers.txt
index 0573851fbc20b..1c7df25ad930f 100644
--- a/libc/config/linux/aarch64/headers.txt
+++ b/libc/config/linux/aarch64/headers.txt
@@ -49,6 +49,7 @@ set(TARGET_PUBLIC_HEADERS
     libc.include.sys_syscall
     libc.include.sys_time
     libc.include.sys_types
+    libc.include.sys_un
     libc.include.sys_utsname
     libc.include.sys_wait
     libc.include.sysexits
diff --git a/libc/config/linux/x86_64/headers.txt b/libc/config/linux/x86_64/headers.txt
index 0669cc30aeadc..543d151465663 100644
--- a/libc/config/linux/x86_64/headers.txt
+++ b/libc/config/linux/x86_64/headers.txt
@@ -51,6 +51,7 @@ set(TARGET_PUBLIC_HEADERS
     libc.include.sys_syscall
     libc.include.sys_time
     libc.include.sys_types
+    libc.include.sys_un
     libc.include.sys_utsname
     libc.include.sys_wait
     libc.include.sysexits
diff --git a/libc/hdr/CMakeLists.txt b/libc/hdr/CMakeLists.txt
index cb8ade0604ba7..20d210c0c443f 100644
--- a/libc/hdr/CMakeLists.txt
+++ b/libc/hdr/CMakeLists.txt
@@ -149,6 +149,15 @@ add_proxy_header_library(
     libc.include.llvm-libc-macros.sys_ioctl_macros
 )
 
+add_proxy_header_library(
+  sys_socket_macros
+  HDRS
+    sys_socket_macros.h
+  FULL_BUILD_DEPENDS
+    libc.include.sys_socket
+    libc.include.llvm-libc-macros.sys_socket_macros
+)
+
 add_proxy_header_library(
   sys_stat_macros
   HDRS
diff --git a/libc/hdr/sys_socket_macros.h b/libc/hdr/sys_socket_macros.h
new file mode 100644
index 0000000000000..5d8279b25b786
--- /dev/null
+++ b/libc/hdr/sys_socket_macros.h
@@ -0,0 +1,22 @@
+//===-- Definition of macros from sys/socket.h ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_HDR_SYS_SOCKET_MACROS_H
+#define LLVM_LIBC_HDR_SYS_SOCKET_MACROS_H
+
+#ifdef LIBC_FULL_BUILD
+
+#include "include/llvm-libc-macros/sys-socket-macros.h"
+
+#else // Overlay mode
+
+#include <sys/socket.h>
+
+#endif // LLVM_LIBC_FULL_BUILD
+
+#endif // LLVM_LIBC_HDR_SYS_SOCKET_MACROS_H
diff --git a/libc/hdr/types/CMakeLists.txt b/libc/hdr/types/CMakeLists.txt
index 03e7066f83a03..1832f906d774c 100644
--- a/libc/hdr/types/CMakeLists.txt
+++ b/libc/hdr/types/CMakeLists.txt
@@ -371,6 +371,15 @@ add_proxy_header_library(
     libc.include.sys_socket
 )
 
+add_proxy_header_library(
+  struct_sockaddr_un
+  HDRS
+    struct_sockaddr_un.h
+  FULL_BUILD_DEPENDS
+    libc.include.llvm-libc-types.struct_sockaddr_un
+    libc.include.sys_socket
+)
+
 add_proxy_header_library(
   socklen_t
   HDRS
diff --git a/libc/hdr/types/struct_sockaddr_un.h b/libc/hdr/types/struct_sockaddr_un.h
new file mode 100644
index 0000000000000..8ac75598da282
--- /dev/null
+++ b/libc/hdr/types/struct_sockaddr_un.h
@@ -0,0 +1,21 @@
+//===-- Proxy for struct sockaddr_un  -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_HDR_TYPES_STRUCT_SOCKADDR_UN_H
+#define LLVM_LIBC_HDR_TYPES_STRUCT_SOCKADDR_UN_H
+
+#ifdef LIBC_FULL_BUILD
+
+#include "include/llvm-libc-types/struct_sockaddr_un.h"
+
+#else
+
+#include <sys/un.h>
+
+#endif // LIBC_FULL_BUILD
+
+#endif // LLVM_LIBC_HDR_TYPES_STRUCT_SOCKADDR_UN_H
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index 5ffee68b5feb3..94c663c7d3917 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -680,7 +680,6 @@ add_header_macro(
     .llvm-libc-types.struct_iovec
     .llvm-libc-types.struct_msghdr
     .llvm-libc-types.struct_sockaddr
-    .llvm-libc-types.struct_sockaddr_un
 )
 
 add_header_macro(
@@ -759,6 +758,15 @@ add_header_macro(
     .llvm-libc-types.ssize_t
 )
 
+add_header_macro(
+  sys_un
+  ../libc/include/sys/un.yaml
+  sys/un.h
+  DEPENDS
+    .llvm_libc_common_h
+    .llvm-libc-types.struct_sockaddr_un
+)
+
 add_header_macro(
   sys_wait
   ../libc/include/sys/wait.yaml
diff --git a/libc/include/sys/socket.yaml b/libc/include/sys/socket.yaml
index 00d5de6af8a80..945e730413adb 100644
--- a/libc/include/sys/socket.yaml
+++ b/libc/include/sys/socket.yaml
@@ -2,7 +2,6 @@ header: sys/socket.h
 header_template: socket.h.def
 macros: []
 types:
-  - type_name: struct_sockaddr_un
   - type_name: struct_sockaddr
   - type_name: socklen_t
   - type_name: sa_family_t
diff --git a/libc/include/sys/un.yaml b/libc/include/sys/un.yaml
new file mode 100644
index 0000000000000..154294ff79cf5
--- /dev/null
+++ b/libc/include/sys/un.yaml
@@ -0,0 +1,5 @@
+header: sys/un.h
+standards:
+  - posix
+types:
+  - type_name: struct_sockaddr_un
diff --git a/libc/test/src/sys/socket/linux/CMakeLists.txt b/libc/test/src/sys/socket/linux/CMakeLists.txt
index de67040122138..1ff814029b330 100644
--- a/libc/test/src/sys/socket/linux/CMakeLists.txt
+++ b/libc/test/src/sys/socket/linux/CMakeLists.txt
@@ -23,6 +23,8 @@ add_libc_unittest(
     bind_test.cpp
   DEPENDS
     libc.include.sys_socket
+    libc.hdr.sys_socket_macros
+    libc.hdr.types.struct_sockaddr_un
     libc.src.errno.errno
     libc.src.sys.socket.socket
     libc.src.sys.socket.bind
diff --git a/libc/test/src/sys/socket/linux/bind_test.cpp b/libc/test/src/sys/socket/linux/bind_test.cpp
index 60cd7c0df10f2..03b8c18cfee83 100644
--- a/libc/test/src/sys/socket/linux/bind_test.cpp
+++ b/libc/test/src/sys/socket/linux/bind_test.cpp
@@ -6,6 +6,8 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "hdr/sys_socket_macros.h"
+#include "hdr/types/struct_sockaddr_un.h"
 #include "src/sys/socket/bind.h"
 #include "src/sys/socket/socket.h"
 
@@ -16,8 +18,6 @@
 #include "test/UnitTest/ErrnoSetterMatcher.h"
 #include "test/UnitTest/Test.h"
 
-#include <sys/socket.h> // For AF_UNIX and SOCK_DGRAM
-
 using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds;
 using LlvmLibcBindTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest;
 

>From 4b9150a7a8cd43cc69e21e1308dcc5b0bb8d6d2e Mon Sep 17 00:00:00 2001
From: Younan Zhang <zyn7109 at gmail.com>
Date: Sun, 8 Mar 2026 14:46:45 +0800
Subject: [PATCH 29/33] [Clang] Fix a concept cache bug on DependentNameType
 (#184771)

The default RAV implementation refuses to traverse NNS's typeloc
recursively, and template parameters inside DependentNameType are
overlooked inadvertently.

No release note as what regression patches always do.

Fixes https://github.com/llvm/llvm-project/issues/184562
---
 clang/lib/Sema/SemaConcept.cpp                |  5 +++
 .../instantiate-requires-expr.cpp             | 31 +++++++++++++++++++
 2 files changed, 36 insertions(+)

diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index b4462c944a4ce..a3dba77284afa 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -386,6 +386,11 @@ class HashParameterMapping : public RecursiveASTVisitor<HashParameterMapping> {
     return TraverseType(TL.getType().getCanonicalType(), TraverseQualifier);
   }
 
+  bool TraverseDependentNameType(const DependentNameType *T,
+                                 bool /*TraverseQualifier*/) {
+    return TraverseNestedNameSpecifier(T->getQualifier());
+  }
+
   bool TraverseTagType(const TagType *T, bool TraverseQualifier) {
     // T's parent can be dependent while T doesn't have any template arguments.
     // We should have already traversed its qualifier.
diff --git a/clang/test/SemaTemplate/instantiate-requires-expr.cpp b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
index 32ad5374f46a7..0b73a9176da8b 100644
--- a/clang/test/SemaTemplate/instantiate-requires-expr.cpp
+++ b/clang/test/SemaTemplate/instantiate-requires-expr.cpp
@@ -278,3 +278,34 @@ namespace sugared_instantiation {
   static_assert(requires { { f() } -> C; });
   static_assert(requires { { f() } -> D; });
 } // namespace sugared_instantiation
+
+namespace GH184562 {
+
+template <typename T>
+struct tid {
+  using type = T;
+};
+template <typename T>
+using tid_t = tid<T>::type;
+
+template <class T, int N = tid_t<T>::value>
+concept req = N < 3 || requires { typename T::template as_two<3>; };
+
+struct two {
+  static constexpr int value = 2;
+};
+
+struct three {
+  static constexpr int value = 3;
+  template <int>
+  using as_two = two;
+};
+
+template <req T>
+struct test {
+  using type = test<typename T::template as_two<1>>;
+};
+
+test<three> x;
+
+}

>From 0b2aaac04a04cd471e2aa635d3462d8c22aa5a04 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Sun, 8 Mar 2026 08:15:38 +0100
Subject: [PATCH 30/33] libclc: Remove target definitions of opencl workitem
 functions (#185206)

These were just calling the __clc implementation, so move it
to generic.
---
 libclc/opencl/lib/amdgcn/SOURCES                    |  4 ----
 libclc/opencl/lib/generic/SOURCES                   |  4 ++++
 .../workitem/get_global_offset.cl                   |  0
 .../{amdgcn => generic}/workitem/get_group_id.cl    |  0
 .../{amdgcn => generic}/workitem/get_local_id.cl    |  0
 .../{amdgcn => generic}/workitem/get_work_dim.cl    |  0
 libclc/opencl/lib/ptx-nvidiacl/SOURCES              |  9 ---------
 .../lib/ptx-nvidiacl/workitem/get_global_id.cl      | 13 -------------
 .../lib/ptx-nvidiacl/workitem/get_group_id.cl       | 13 -------------
 .../lib/ptx-nvidiacl/workitem/get_local_id.cl       | 13 -------------
 .../ptx-nvidiacl/workitem/get_local_linear_id.cl    | 13 -------------
 .../ptx-nvidiacl/workitem/get_max_sub_group_size.cl | 13 -------------
 .../lib/ptx-nvidiacl/workitem/get_num_sub_groups.cl | 13 -------------
 .../lib/ptx-nvidiacl/workitem/get_sub_group_id.cl   | 13 -------------
 .../ptx-nvidiacl/workitem/get_sub_group_local_id.cl | 13 -------------
 .../lib/ptx-nvidiacl/workitem/get_sub_group_size.cl | 13 -------------
 16 files changed, 4 insertions(+), 130 deletions(-)
 rename libclc/opencl/lib/{amdgcn => generic}/workitem/get_global_offset.cl (100%)
 rename libclc/opencl/lib/{amdgcn => generic}/workitem/get_group_id.cl (100%)
 rename libclc/opencl/lib/{amdgcn => generic}/workitem/get_local_id.cl (100%)
 rename libclc/opencl/lib/{amdgcn => generic}/workitem/get_work_dim.cl (100%)
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_global_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_group_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_linear_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_max_sub_group_size.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_num_sub_groups.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_local_id.cl
 delete mode 100644 libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_size.cl

diff --git a/libclc/opencl/lib/amdgcn/SOURCES b/libclc/opencl/lib/amdgcn/SOURCES
index c9d29fb2d1905..4153d964e118d 100644
--- a/libclc/opencl/lib/amdgcn/SOURCES
+++ b/libclc/opencl/lib/amdgcn/SOURCES
@@ -2,7 +2,3 @@ async/wait_group_events.cl
 mem_fence/fence.cl
 subgroup/subgroup.cl
 synchronization/sub_group_barrier.cl
-workitem/get_global_offset.cl
-workitem/get_group_id.cl
-workitem/get_local_id.cl
-workitem/get_work_dim.cl
diff --git a/libclc/opencl/lib/generic/SOURCES b/libclc/opencl/lib/generic/SOURCES
index 18dd6fd3a10c2..398e326ffe482 100644
--- a/libclc/opencl/lib/generic/SOURCES
+++ b/libclc/opencl/lib/generic/SOURCES
@@ -206,6 +206,10 @@ workitem/get_enqueued_local_size.cl
 workitem/get_global_id.cl
 workitem/get_global_linear_id.cl
 workitem/get_global_size.cl
+workitem/get_local_id.cl
 workitem/get_local_linear_id.cl
 workitem/get_local_size.cl
 workitem/get_num_groups.cl
+workitem/get_global_offset.cl
+workitem/get_group_id.cl
+workitem/get_work_dim.cl
diff --git a/libclc/opencl/lib/amdgcn/workitem/get_global_offset.cl b/libclc/opencl/lib/generic/workitem/get_global_offset.cl
similarity index 100%
rename from libclc/opencl/lib/amdgcn/workitem/get_global_offset.cl
rename to libclc/opencl/lib/generic/workitem/get_global_offset.cl
diff --git a/libclc/opencl/lib/amdgcn/workitem/get_group_id.cl b/libclc/opencl/lib/generic/workitem/get_group_id.cl
similarity index 100%
rename from libclc/opencl/lib/amdgcn/workitem/get_group_id.cl
rename to libclc/opencl/lib/generic/workitem/get_group_id.cl
diff --git a/libclc/opencl/lib/amdgcn/workitem/get_local_id.cl b/libclc/opencl/lib/generic/workitem/get_local_id.cl
similarity index 100%
rename from libclc/opencl/lib/amdgcn/workitem/get_local_id.cl
rename to libclc/opencl/lib/generic/workitem/get_local_id.cl
diff --git a/libclc/opencl/lib/amdgcn/workitem/get_work_dim.cl b/libclc/opencl/lib/generic/workitem/get_work_dim.cl
similarity index 100%
rename from libclc/opencl/lib/amdgcn/workitem/get_work_dim.cl
rename to libclc/opencl/lib/generic/workitem/get_work_dim.cl
diff --git a/libclc/opencl/lib/ptx-nvidiacl/SOURCES b/libclc/opencl/lib/ptx-nvidiacl/SOURCES
index 3ece564c9760e..f20917346a3bc 100644
--- a/libclc/opencl/lib/ptx-nvidiacl/SOURCES
+++ b/libclc/opencl/lib/ptx-nvidiacl/SOURCES
@@ -1,10 +1 @@
 mem_fence/fence.cl
-workitem/get_global_id.cl
-workitem/get_group_id.cl
-workitem/get_local_id.cl
-workitem/get_local_linear_id.cl
-workitem/get_max_sub_group_size.cl
-workitem/get_num_sub_groups.cl
-workitem/get_sub_group_id.cl
-workitem/get_sub_group_local_id.cl
-workitem/get_sub_group_size.cl
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_global_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_global_id.cl
deleted file mode 100644
index c84991ade76fc..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_global_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_global_id.h>
-
-_CLC_OVERLOAD _CLC_DEF size_t get_global_id(uint dim) {
-  return __clc_get_global_id(dim);
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_group_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_group_id.cl
deleted file mode 100644
index 6fc9832058736..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_group_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_group_id.h>
-
-_CLC_OVERLOAD _CLC_DEF size_t get_group_id(uint dim) {
-  return __clc_get_group_id(dim);
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_id.cl
deleted file mode 100644
index a2b608baf7aeb..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_local_id.h>
-
-_CLC_OVERLOAD _CLC_DEF size_t get_local_id(uint dim) {
-  return __clc_get_local_id(dim);
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_linear_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_linear_id.cl
deleted file mode 100644
index ccb301960726a..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_local_linear_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_local_linear_id.h>
-
-_CLC_OVERLOAD _CLC_DEF size_t get_local_linear_id() {
-  return __clc_get_local_linear_id();
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_max_sub_group_size.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_max_sub_group_size.cl
deleted file mode 100644
index aa06c9561404b..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_max_sub_group_size.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_max_sub_group_size.h>
-
-_CLC_OVERLOAD _CLC_DEF uint get_max_sub_group_size() {
-  return __clc_get_max_sub_group_size();
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_num_sub_groups.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_num_sub_groups.cl
deleted file mode 100644
index e23d78dafa5e8..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_num_sub_groups.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_num_sub_groups.h>
-
-_CLC_OVERLOAD _CLC_DEF uint get_num_sub_groups() {
-  return __clc_get_num_sub_groups();
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_id.cl
deleted file mode 100644
index deb6134cd0704..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_sub_group_id.h>
-
-_CLC_OVERLOAD _CLC_DEF uint get_sub_group_id() {
-  return __clc_get_sub_group_id();
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_local_id.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_local_id.cl
deleted file mode 100644
index 37487fbf3ece8..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_local_id.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_sub_group_local_id.h>
-
-_CLC_OVERLOAD _CLC_DEF uint get_sub_group_local_id() {
-  return __clc_get_sub_group_local_id();
-}
diff --git a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_size.cl b/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_size.cl
deleted file mode 100644
index e6be6f75171d1..0000000000000
--- a/libclc/opencl/lib/ptx-nvidiacl/workitem/get_sub_group_size.cl
+++ /dev/null
@@ -1,13 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include <clc/workitem/clc_get_sub_group_size.h>
-
-_CLC_OVERLOAD _CLC_DEF uint get_sub_group_size() {
-  return __clc_get_sub_group_size();
-}

>From 56207698bc19ee1546a01e6f26505236da56302c Mon Sep 17 00:00:00 2001
From: xlauko <xlauko at mail.muni.cz>
Date: Sat, 7 Mar 2026 08:42:26 +0100
Subject: [PATCH 31/33] [CIR] Add Commutative/Idempotent traits to binary ops

Add missing MLIR traits to CIR binary operations, matching the arith
dialect conventions:

- AndOp, OrOp: Commutative, Idempotent (fixes FIXME)
- AddOp, MulOp, XorOp, MaxOp: Commutative

Add these ops to the CIRCanonicalize pass op list so trait-based
folding is exercised by applyOpPatternsGreedily.

Update testFloatingPointBinOps in binop.cpp to use computed values,
preventing DCE of the now-canonicalized ops.
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  | 22 ++++---
 .../Dialect/Transforms/CIRCanonicalize.cpp    | 11 ++--
 clang/test/CIR/CodeGen/binop.cpp              | 46 ++++++++-----
 clang/test/CIR/CodeGen/new.cpp                |  9 +--
 clang/test/CIR/CodeGen/size-of-vla.cpp        | 10 +--
 clang/test/CIR/CodeGen/throws.cpp             |  4 +-
 clang/test/CIR/CodeGen/vla.c                  |  8 +--
 clang/test/CIR/Transforms/binop-traits.cir    | 65 +++++++++++++++++++
 8 files changed, 125 insertions(+), 50 deletions(-)
 create mode 100644 clang/test/CIR/Transforms/binop-traits.cir

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 79eef71229192..9035cf1b2005a 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -2186,7 +2186,9 @@ class CIR_BinaryOpWithOverflowFlags<string mnemonic, Type type,
 // AddOp
 //===----------------------------------------------------------------------===//
 
-def CIR_AddOp : CIR_BinaryOpWithOverflowFlags<"add", CIR_AnyArithType> {
+def CIR_AddOp : CIR_BinaryOpWithOverflowFlags<"add", CIR_AnyArithType, [
+  Commutative
+]> {
   let summary = "Integer or floating-point addition";
   let description = [{
     The `cir.add` operation performs addition on integer or floating-point
@@ -2241,7 +2243,7 @@ def CIR_SubOp : CIR_BinaryOpWithOverflowFlags<"sub", CIR_AnyArithType> {
 // MulOp
 //===----------------------------------------------------------------------===//
 
-def CIR_MulOp : CIR_BinaryOp<"mul", CIR_AnyArithType> {
+def CIR_MulOp : CIR_BinaryOp<"mul", CIR_AnyArithType, [Commutative]> {
   let summary = "Integer or floating-point multiplication";
   let description = [{
     The `cir.mul` operation performs multiplication on integer or floating-point
@@ -2321,8 +2323,9 @@ def CIR_RemOp : CIR_BinaryOp<"rem", CIR_AnyArithType> {
 // AndOp
 //===----------------------------------------------------------------------===//
 
-// FIXME: Commutative, Idempotent traits
-def CIR_AndOp : CIR_BinaryOp<"and", CIR_AnyBitwiseType> {
+def CIR_AndOp : CIR_BinaryOp<"and", CIR_AnyBitwiseType, [
+  Commutative, Idempotent
+]> {
   let summary = "Bitwise AND";
   let description = [{
     The `cir.and` operation performs a bitwise AND on integer operands.
@@ -2341,8 +2344,9 @@ def CIR_AndOp : CIR_BinaryOp<"and", CIR_AnyBitwiseType> {
 // OrOp
 //===----------------------------------------------------------------------===//
 
-// FIXME: Commutative, Idempotent traits
-def CIR_OrOp : CIR_BinaryOp<"or", CIR_AnyBitwiseType> {
+def CIR_OrOp : CIR_BinaryOp<"or", CIR_AnyBitwiseType, [
+  Commutative, Idempotent
+]> {
   let summary = "Bitwise OR";
   let description = [{
     The `cir.or` operation performs a bitwise OR on integer operands.
@@ -2361,7 +2365,7 @@ def CIR_OrOp : CIR_BinaryOp<"or", CIR_AnyBitwiseType> {
 // XorOp
 //===----------------------------------------------------------------------===//
 
-def CIR_XorOp : CIR_BinaryOp<"xor", CIR_AnyBitwiseType> {
+def CIR_XorOp : CIR_BinaryOp<"xor", CIR_AnyBitwiseType, [Commutative]> {
   let summary = "Bitwise XOR";
   let description = [{
     The `cir.xor` operation performs a bitwise XOR on integer operands.
@@ -2380,7 +2384,9 @@ def CIR_XorOp : CIR_BinaryOp<"xor", CIR_AnyBitwiseType> {
 // MaxOp
 //===----------------------------------------------------------------------===//
 
-def CIR_MaxOp : CIR_BinaryOp<"max", CIR_AnyIntOrVecOfIntType> {
+def CIR_MaxOp : CIR_BinaryOp<"max", CIR_AnyIntOrVecOfIntType, [
+  Commutative, Idempotent
+]> {
   let summary = "Integer maximum";
   let description = [{
     The `cir.max` operation computes the maximum of two integer operands.
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index e9b825606a04e..46a8c011b320b 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -70,11 +70,12 @@ void CIRCanonicalizePass::runOnOperation() {
 
     // Many operations are here to perform a manual `fold` in
     // applyOpPatternsGreedily.
-    if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp,
-            ComplexCreateOp, ComplexImagOp, ComplexRealOp, VecCmpOp,
-            VecCreateOp, VecExtractOp, VecShuffleOp, VecShuffleDynamicOp,
-            VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitFfsOp, BitParityOp,
-            BitPopcountOp, BitReverseOp, ByteSwapOp, RotateOp, ConstantOp>(op))
+    if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp, AddOp,
+            MulOp, AndOp, OrOp, XorOp, MaxOp, ComplexCreateOp, ComplexImagOp,
+            ComplexRealOp, VecCmpOp, VecCreateOp, VecExtractOp, VecShuffleOp,
+            VecShuffleDynamicOp, VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp,
+            BitFfsOp, BitParityOp, BitPopcountOp, BitReverseOp, ByteSwapOp,
+            RotateOp, ConstantOp>(op))
       ops.push_back(op);
   });
 
diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp
index 3d883f14acdc9..33498b90e3a42 100644
--- a/clang/test/CIR/CodeGen/binop.cpp
+++ b/clang/test/CIR/CodeGen/binop.cpp
@@ -127,10 +127,10 @@ void b0(int a, int b) {
 // OGCG:         ret void
 
 void testFloatingPointBinOps(float a, float b) {
-  a * b;
-  a / b;
-  a + b;
-  a - b;
+  float x = a * b;
+  x = x / b;
+  x = x + b;
+  x = x - b;
 }
 
 // CIR-LABEL: cir.func{{.*}} @_Z23testFloatingPointBinOpsff(
@@ -144,48 +144,58 @@ void testFloatingPointBinOps(float a, float b) {
 // LLVM-SAME: float {{.*}} %[[A:.*]], float {{.*}} %[[B:.*]])
 // LLVM:         %[[A_ADDR:.*]] = alloca float, i64 1
 // LLVM:         %[[B_ADDR:.*]] = alloca float, i64 1
+// LLVM:         %[[X_ADDR:.*]] = alloca float, i64 1
 // LLVM:         store float %[[A]], ptr %[[A_ADDR]]
 // LLVM:         store float %[[B]], ptr %[[B_ADDR]]
 
 // LLVM:         %[[A1:.*]] = load float, ptr %[[A_ADDR]]
 // LLVM:         %[[B1:.*]] = load float, ptr %[[B_ADDR]]
-// LLVM:         fmul float %[[A1]], %[[B1]]
+// LLVM:         %[[MUL:.*]] = fmul float %[[A1]], %[[B1]]
+// LLVM:         store float %[[MUL]], ptr %[[X_ADDR]]
 
-// LLVM:         %[[A2:.*]] = load float, ptr %[[A_ADDR]]
+// LLVM:         %[[X1:.*]] = load float, ptr %[[X_ADDR]]
 // LLVM:         %[[B2:.*]] = load float, ptr %[[B_ADDR]]
-// LLVM:         fdiv float %[[A2]], %[[B2]]
+// LLVM:         %[[DIV:.*]] = fdiv float %[[X1]], %[[B2]]
+// LLVM:         store float %[[DIV]], ptr %[[X_ADDR]]
 
-// LLVM:         %[[A3:.*]] = load float, ptr %[[A_ADDR]]
+// LLVM:         %[[X2:.*]] = load float, ptr %[[X_ADDR]]
 // LLVM:         %[[B3:.*]] = load float, ptr %[[B_ADDR]]
-// LLVM:         fadd float %[[A3]], %[[B3]]
+// LLVM:         %[[ADD:.*]] = fadd float %[[X2]], %[[B3]]
+// LLVM:         store float %[[ADD]], ptr %[[X_ADDR]]
 
-// LLVM:         %[[A4:.*]] = load float, ptr %[[A_ADDR]]
+// LLVM:         %[[X3:.*]] = load float, ptr %[[X_ADDR]]
 // LLVM:         %[[B4:.*]] = load float, ptr %[[B_ADDR]]
-// LLVM:         fsub float %[[A4]], %[[B4]]
+// LLVM:         %[[SUB:.*]] = fsub float %[[X3]], %[[B4]]
+// LLVM:         store float %[[SUB]], ptr %[[X_ADDR]]
 
 // LLVM:         ret void
 
 // OGCG-LABEL: define{{.*}} void @_Z23testFloatingPointBinOpsff(float {{.*}} %a, float {{.*}} %b)
 // OGCG:         %a.addr = alloca float
 // OGCG:         %b.addr = alloca float
+// OGCG:         %x = alloca float
 // OGCG:         store float %a, ptr %a.addr
 // OGCG:         store float %b, ptr %b.addr
 
 // OGCG:         %[[A1:.*]] = load float, ptr %a.addr
 // OGCG:         %[[B1:.*]] = load float, ptr %b.addr
-// OGCG:         fmul float %[[A1]], %[[B1]]
+// OGCG:         %[[MUL:.*]] = fmul float %[[A1]], %[[B1]]
+// OGCG:         store float %[[MUL]], ptr %x
 
-// OGCG:         %[[A2:.*]] = load float, ptr %a.addr
+// OGCG:         %[[X1:.*]] = load float, ptr %x
 // OGCG:         %[[B2:.*]] = load float, ptr %b.addr
-// OGCG:         fdiv float %[[A2]], %[[B2]]
+// OGCG:         %[[DIV:.*]] = fdiv float %[[X1]], %[[B2]]
+// OGCG:         store float %[[DIV]], ptr %x
 
-// OGCG:         %[[A3:.*]] = load float, ptr %a.addr
+// OGCG:         %[[X2:.*]] = load float, ptr %x
 // OGCG:         %[[B3:.*]] = load float, ptr %b.addr
-// OGCG:         fadd float %[[A3]], %[[B3]]
+// OGCG:         %[[ADD:.*]] = fadd float %[[X2]], %[[B3]]
+// OGCG:         store float %[[ADD]], ptr %x
 
-// OGCG:         %[[A4:.*]] = load float, ptr %a.addr
+// OGCG:         %[[X3:.*]] = load float, ptr %x
 // OGCG:         %[[B4:.*]] = load float, ptr %b.addr
-// OGCG:         fsub float %[[A4]], %[[B4]]
+// OGCG:         %[[SUB:.*]] = fsub float %[[X3]], %[[B4]]
+// OGCG:         store float %[[SUB]], ptr %x
 
 // OGCG:         ret void
 
diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp
index 20fe26ac833e0..74aa74a8d51af 100644
--- a/clang/test/CIR/CodeGen/new.cpp
+++ b/clang/test/CIR/CodeGen/new.cpp
@@ -551,19 +551,14 @@ void t_new_var_size5(int n) {
   auto p = new double[n][2][3];
 }
 
-// NUM_ELEMENTS isn't used in this case because there is no cookie. It isn't
-// used in the allocation size because the allocation size is calculated with
-// the element size and the fixed size dimensions already combined (6 * 8 = 48).
-// We don't DCE NUM_ELEMENTS because it's not a constant, but later
-// optimizations will eliminate it.
+// The allocation size is calculated with the element size and the fixed size
+// dimensions already combined (6 * 8 = 48).
 
 // CHECK:  cir.func {{.*}} @_Z15t_new_var_size5i
 // CHECK:    %[[N:.*]] = cir.load{{.*}} %[[ARG_ALLOCA:.*]]
 // CHECK:    %[[N_SIZE_T:.*]] = cir.cast integral %[[N]] : !s32i -> !u64i
 // CHECK:    %[[ELEMENT_SIZE:.*]] = cir.const #cir.int<48> : !u64i
 // CHECK:    %[[RESULT:.*]], %[[OVERFLOW:.*]] = cir.binop.overflow(mul, %[[N_SIZE_T]], %[[ELEMENT_SIZE]]) : !u64i, (!u64i, !cir.bool)
-// CHECK:    %[[NUM_ELEMENTS_MULTIPLIER:.*]] = cir.const #cir.int<6>
-// CHECK:    %[[NUM_ELEMENTS:.*]] = cir.mul %[[N_SIZE_T]], %[[NUM_ELEMENTS_MULTIPLIER]] : !u64i
 // CHECK:    %[[ALL_ONES:.*]] = cir.const #cir.int<18446744073709551615> : !u64i
 // CHECK:    %[[ALLOC_SIZE:.*]] = cir.select if %[[OVERFLOW]] then %[[ALL_ONES]] else %[[RESULT]] : (!cir.bool, !u64i, !u64i)
 // CHECK:    %[[PTR:.*]] = cir.call @_Znam(%[[ALLOC_SIZE]]) {allocsize = array<i32: 0>} : (!u64i {llvm.noundef})
diff --git a/clang/test/CIR/CodeGen/size-of-vla.cpp b/clang/test/CIR/CodeGen/size-of-vla.cpp
index 789e97f40440b..3f9b2f5fecbbd 100644
--- a/clang/test/CIR/CodeGen/size-of-vla.cpp
+++ b/clang/test/CIR/CodeGen/size-of-vla.cpp
@@ -38,16 +38,16 @@ void vla_type_with_element_type_int() {
 // CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
 // CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
 // CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
-// CIR: %3 = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
+// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
 // CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
-// CIR: %[[SIZE:.*]] = cir.mul nuw %[[CONST_4]], %3 : !u64i
+// CIR: %[[SIZE:.*]] = cir.mul nuw %[[TMP_N]], %[[CONST_4]] : !u64i
 // CIR: cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
 
 // LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
 // LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
 // LLVM: store i64 10, ptr %[[N_ADDR]], align 8
 // LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
-// LLVM: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
+// LLVM: %[[SIZE:.*]] = mul nuw i64 %[[TMP_N]], 4
 // LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
 
 // OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
@@ -126,7 +126,7 @@ void vla_expr_element_type_int() {
 // CIR: cir.cleanup.scope {
 // CIR:   %[[ARR_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[TMP_N]] : !u64i, ["arr"]
 // CIR:   %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
-// CIR:   %[[SIZE:.*]] = cir.mul nuw %[[CONST_4]], %[[TMP_N]] : !u64i
+// CIR:   %[[SIZE:.*]] = cir.mul nuw %[[TMP_N]], %[[CONST_4]] : !u64i
 // CIR:   cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
 // CIR:   cir.yield
 // CIR: } cleanup normal {
@@ -143,7 +143,7 @@ void vla_expr_element_type_int() {
 // LLVM: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
 // LLVM: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
 // LLVM: %[[ARR_ADDR:.*]] = alloca i32, i64 %[[TMP_N]], align 16
-// LLVM: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
+// LLVM: %[[SIZE:.*]] = mul nuw i64 %[[TMP_N]], 4
 // LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
 // LLVM: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
 // LLVM: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])
diff --git a/clang/test/CIR/CodeGen/throws.cpp b/clang/test/CIR/CodeGen/throws.cpp
index 64cf0f6d62b0a..14b1d1272217c 100644
--- a/clang/test/CIR/CodeGen/throws.cpp
+++ b/clang/test/CIR/CodeGen/throws.cpp
@@ -112,9 +112,7 @@ void paren_expr() { (throw 0, 1 + 2); }
 // CIR:   cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, @_ZTIi
 // CIR:   cir.unreachable
 // CIR: ^bb1:
-// CIR:   %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
-// CIR:   %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
-// CIR:   %[[ADD:.*]] = cir.add nsw %[[CONST_1]], %[[CONST_2]] : !s32i
+// CIR:   cir.return
 
 // LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
 // LLVM: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index 2eb8d3c3814af..f30a438834660 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -62,7 +62,7 @@ void f1(int len) {
 // CIR:   %[[LEN_SIZE_T:.*]] = cir.cast integral %[[LEN]] : !s32i -> !u64i
 // CIR:   %[[STACK_PTR:.*]] = cir.stacksave
 // CIR:   cir.store{{.*}} %[[STACK_PTR]], %[[SAVED_STACK]]
-// CIR:   %[[TOTAL_LEN:.*]] = cir.mul nuw %[[SIXTEEN]], %[[LEN_SIZE_T]]
+// CIR:   %[[TOTAL_LEN:.*]] = cir.mul nuw %[[LEN_SIZE_T]], %[[SIXTEEN]]
 // CIR:   %[[ARR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[TOTAL_LEN]] : !u64i, ["arr"]
 // CIR:   %[[STACK_RESTORE_PTR:.*]] = cir.load{{.*}} %[[SAVED_STACK]]
 // CIR:   cir.stackrestore %[[STACK_RESTORE_PTR]]
@@ -75,7 +75,7 @@ void f1(int len) {
 // LLVM:   %[[LEN_SIZE_T:.*]] = sext i32 %[[LEN]] to i64
 // LLVM:   %[[STACK_PTR:.*]] = call ptr @llvm.stacksave.p0()
 // LLVM:   store ptr %[[STACK_PTR]], ptr %[[SAVED_STACK]]
-// LLVM:   %[[TOTAL_LEN:.*]] = mul nuw i64 16, %[[LEN_SIZE_T]]
+// LLVM:   %[[TOTAL_LEN:.*]] = mul nuw i64 %[[LEN_SIZE_T]], 16
 // LLVM:   %[[ARR:.*]] = alloca i32, i64 %[[TOTAL_LEN]]
 // LLVM:   %[[STACK_RESTORE_PTR:.*]] = load ptr, ptr %[[SAVED_STACK]]
 // LLVM:   call void @llvm.stackrestore.p0(ptr %[[STACK_RESTORE_PTR]])
@@ -361,7 +361,7 @@ void vla_subscript_expr() {
 // CIR: %[[COMPOUND_PTR:.*]] = cir.ptr_stride %[[TMP_COMPOUND]], %[[CONST_0]] : (!cir.ptr<!cir.ptr<!s32i>>, !s32i) -> !cir.ptr<!cir.ptr<!s32i>>
 // CIR: %[[TMP_COMPOUND:.*]] = cir.load {{.*}} %10 : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
 // CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !u64i
-// CIR: %[[VLA_IDX:.*]] = cir.mul nsw %[[CONST_1]], %7 : !u64i
+// CIR: %[[VLA_IDX:.*]] = cir.mul nsw %[[TMP_N]], %[[CONST_1]] : !u64i
 // CIR: %[[VLA_A_PTR:.*]] = cir.ptr_stride %[[TMP_COMPOUND]], %[[VLA_IDX]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
 // CIR: %[[ELEM_5_PTR:.*]] = cir.ptr_stride %[[VLA_A_PTR]], %[[CONST_5]] : (!cir.ptr<!s32i>, !s32i) -> !cir.ptr<!s32i>
 // CIR: cir.store {{.*}} %[[CONST_0_VAL]], %[[ELEM_5_PTR]] : !s32i, !cir.ptr<!s32i>
@@ -375,7 +375,7 @@ void vla_subscript_expr() {
 // LLVM: %[[TMP_COMPOUND:.*]] = load ptr, ptr %[[COMPOUND_ADDR]], align 8
 // LLVM: %[[COMPOUND_PTR:.*]] = getelementptr ptr, ptr %[[TMP_COMPOUND]], i64 0
 // LLVM: %[[TMP_COMPOUND:.*]] = load ptr, ptr %[[COMPOUND_PTR]], align 8
-// LLVM: %[[VLA_IDX:.*]] = mul nsw i64 1, %[[TMP_N]]
+// LLVM: %[[VLA_IDX:.*]] = mul nsw i64 %[[TMP_N]], 1
 // LLVM: %[[VLA_A_PTR:.*]] = getelementptr i32, ptr %[[TMP_COMPOUND]], i64 %[[VLA_IDX]]
 // LLVM: %[[ELEM_5_PTR:.*]] = getelementptr i32, ptr %[[VLA_A_PTR]], i64 5
 // LLVM: store i32 0, ptr %[[ELEM_5_PTR]], align 4
diff --git a/clang/test/CIR/Transforms/binop-traits.cir b/clang/test/CIR/Transforms/binop-traits.cir
new file mode 100644
index 0000000000000..df2b12cf9f178
--- /dev/null
+++ b/clang/test/CIR/Transforms/binop-traits.cir
@@ -0,0 +1,65 @@
+// RUN: cir-opt %s -cir-canonicalize -o - | FileCheck %s
+
+!s32i = !cir.int<s, 32>
+
+// CHECK-LABEL: @and_idempotent
+// CHECK-NEXT: cir.return %arg0
+cir.func @and_idempotent(%arg0 : !s32i) -> !s32i {
+  %0 = cir.and %arg0, %arg0 : !s32i
+  cir.return %0 : !s32i
+}
+
+// CHECK-LABEL: @or_idempotent
+// CHECK-NEXT: cir.return %arg0
+cir.func @or_idempotent(%arg0 : !s32i) -> !s32i {
+  %0 = cir.or %arg0, %arg0 : !s32i
+  cir.return %0 : !s32i
+}
+
+// CHECK-LABEL: @and_commutative
+// CHECK: cir.and %arg0, %{{.*}} : !s32i
+cir.func @and_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.and %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}
+
+// CHECK-LABEL: @or_commutative
+// CHECK: cir.or %arg0, %{{.*}} : !s32i
+cir.func @or_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.or %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}
+
+// CHECK-LABEL: @xor_commutative
+// CHECK: cir.xor %arg0, %{{.*}} : !s32i
+cir.func @xor_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.xor %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}
+
+// CHECK-LABEL: @add_commutative
+// CHECK: cir.add %arg0, %{{.*}} : !s32i
+cir.func @add_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.add %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}
+
+// CHECK-LABEL: @mul_commutative
+// CHECK: cir.mul %arg0, %{{.*}} : !s32i
+cir.func @mul_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.mul %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}
+
+// CHECK-LABEL: @max_commutative
+// CHECK: cir.max %arg0, %{{.*}} : !s32i
+cir.func @max_commutative(%arg0 : !s32i) -> !s32i {
+  %0 = cir.const #cir.int<42> : !s32i
+  %1 = cir.max %0, %arg0 : !s32i
+  cir.return %1 : !s32i
+}

>From 78386644da1a34aba3894ffe24942a0899fac524 Mon Sep 17 00:00:00 2001
From: xlauko <xlauko at mail.muni.cz>
Date: Sat, 7 Mar 2026 14:19:44 +0100
Subject: [PATCH 32/33] [CIR] Remove cir.unary(plus, ...) and emit nothing for
 unary plus
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Traditional codegen never emits any operation for unary plus — it just
visits the subexpression as a pure identity at the codegen level. Align
CIRGen with this behavior by removing Plus from UnaryOpKind entirely
and having VisitUnaryPlus directly visit the subexpression with the
appropriate promotion/demotion handling.
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |   7 +-
 clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp   |  34 ++---
 clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp    |  26 ++--
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp       |  11 +-
 .../Dialect/Transforms/LoweringPrepare.cpp    |   1 -
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |  24 +---
 clang/lib/CodeGen/CGExprComplex.cpp           | 135 +++++++++---------
 clang/test/CIR/CodeGen/complex-unary.cpp      |  36 ++---
 clang/test/CIR/CodeGen/complex.cpp            |  20 +--
 clang/test/CIR/CodeGen/coro-task.cpp          |   3 +-
 .../CIR/CodeGen/lambda-static-invoker.cpp     |   3 +-
 clang/test/CIR/CodeGen/unary.cpp              |  15 +-
 clang/test/CIR/CodeGen/vector-ext.cpp         |   3 +-
 clang/test/CIR/CodeGen/vector.cpp             |   3 +-
 clang/test/CIR/IR/unary.cir                   |  36 +++--
 clang/test/CIR/Transforms/canonicalize.cir    |  72 ----------
 16 files changed, 146 insertions(+), 283 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 9035cf1b2005a..5d21a622d5e29 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1709,16 +1709,15 @@ def CIR_LabelOp : CIR_Op<"label", [AlwaysSpeculatable]> {
 def CIR_UnaryOpKind : CIR_I32EnumAttr<"UnaryOpKind", "unary operation kind", [
   I32EnumAttrCase<"Inc",   0, "inc">,
   I32EnumAttrCase<"Dec",   1, "dec">,
-  I32EnumAttrCase<"Plus",  2, "plus">,
-  I32EnumAttrCase<"Minus", 3, "minus">,
-  I32EnumAttrCase<"Not",   4, "not">
+  I32EnumAttrCase<"Minus", 2, "minus">,
+  I32EnumAttrCase<"Not",   3, "not">
 ]>;
 
 def CIR_UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> {
   let summary = "Unary operations";
   let description = [{
     `cir.unary` performs the unary operation according to
-    the specified opcode kind: [inc, dec, plus, minus, not].
+    the specified opcode kind: [inc, dec, minus, not].
 
     It requires one input operand and has one result, both types
     should be the same.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index ff5a58a8eb122..1d05f03491d40 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -193,9 +193,9 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> {
   mlir::Value VisitUnaryDeref(const Expr *e) { return emitLoadOfLValue(e); }
 
   mlir::Value VisitUnaryPlus(const UnaryOperator *e);
+  mlir::Value VisitUnaryPlus(const UnaryOperator *e, QualType promotionType);
   mlir::Value VisitUnaryMinus(const UnaryOperator *e);
-  mlir::Value VisitPlusMinus(const UnaryOperator *e, cir::UnaryOpKind kind,
-                             QualType promotionType);
+  mlir::Value VisitUnaryMinus(const UnaryOperator *e, QualType promotionType);
   mlir::Value VisitUnaryNot(const UnaryOperator *e);
   // LNot,Real,Imag never return complex.
   mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
@@ -574,32 +574,36 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
 
 mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
   QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
-  mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
+  mlir::Value result = VisitUnaryPlus(e, promotionTy);
   if (!promotionTy.isNull())
     return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
   return result;
 }
 
+mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e,
+                                               QualType promotionType) {
+  if (!promotionType.isNull())
+    return cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
+  return Visit(e->getSubExpr());
+}
+
 mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
   QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
-  mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
+  mlir::Value result = VisitUnaryMinus(e, promotionTy);
   if (!promotionTy.isNull())
     return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
   return result;
 }
 
-mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
-                                               cir::UnaryOpKind kind,
-                                               QualType promotionType) {
-  assert((kind == cir::UnaryOpKind::Plus || kind == cir::UnaryOpKind::Minus) &&
-         "Invalid UnaryOp kind for ComplexType Plus or Minus");
-
+mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e,
+                                                QualType promotionType) {
   mlir::Value op;
   if (!promotionType.isNull())
     op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
   else
     op = Visit(e->getSubExpr());
-  return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()), kind, op);
+  return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()),
+                               cir::UnaryOpKind::Minus, op);
 }
 
 mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *e) {
@@ -766,12 +770,10 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
     }
   } else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) {
     switch (unaryOp->getOpcode()) {
+    case UO_Plus:
+      return VisitUnaryPlus(unaryOp, promotionTy);
     case UO_Minus:
-    case UO_Plus: {
-      auto kind = unaryOp->getOpcode() == UO_Plus ? cir::UnaryOpKind::Plus
-                                                  : cir::UnaryOpKind::Minus;
-      return VisitPlusMinus(unaryOp, kind, promotionTy);
-    }
+      return VisitUnaryMinus(unaryOp, promotionTy);
     default:
       break;
     }
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 1b5ba5ee6783f..29aa35e762f45 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -766,25 +766,28 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
   mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
     QualType promotionType = getPromotionType(e->getSubExpr()->getType());
-    mlir::Value result =
-        emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
+    mlir::Value result = VisitUnaryPlus(e, promotionType);
     if (result && !promotionType.isNull())
       return emitUnPromotedValue(result, e->getType());
     return result;
   }
 
+  mlir::Value VisitUnaryPlus(const UnaryOperator *e, QualType promotionType) {
+    ignoreResultAssign = false;
+    if (!promotionType.isNull())
+      return cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
+    return Visit(e->getSubExpr());
+  }
+
   mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
     QualType promotionType = getPromotionType(e->getSubExpr()->getType());
-    mlir::Value result =
-        emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
+    mlir::Value result = VisitUnaryMinus(e, promotionType);
     if (result && !promotionType.isNull())
       return emitUnPromotedValue(result, e->getType());
     return result;
   }
 
-  mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
-                                   cir::UnaryOpKind kind,
-                                   QualType promotionType) {
+  mlir::Value VisitUnaryMinus(const UnaryOperator *e, QualType promotionType) {
     ignoreResultAssign = false;
     mlir::Value operand;
     if (!promotionType.isNull())
@@ -795,14 +798,13 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
     // TODO(cir): We might have to change this to support overflow trapping.
     //            Classic codegen routes unary minus through emitSub to ensure
     //            that the overflow behavior is handled correctly.
-    bool nsw = kind == cir::UnaryOpKind::Minus &&
-               e->getType()->isSignedIntegerType() &&
+    bool nsw = e->getType()->isSignedIntegerType() &&
                cgf.getLangOpts().getSignedOverflowBehavior() !=
                    LangOptions::SOB_Defined;
 
     // NOTE: LLVM codegen will lower this directly to either a FNeg
     // or a Sub instruction.  In CIR this will be handled later in LowerToLLVM.
-    return emitUnaryOp(e, kind, operand, nsw);
+    return emitUnaryOp(e, cir::UnaryOpKind::Minus, operand, nsw);
   }
 
   mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
@@ -1560,9 +1562,9 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
     case UO_Real:
       return VisitRealImag(uo, promotionType);
     case UO_Minus:
-      return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
+      return VisitUnaryMinus(uo, promotionType);
     case UO_Plus:
-      return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
+      return VisitUnaryPlus(uo, promotionType);
     default:
       break;
     }
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 8d2990af5de8c..87cf3950b04d6 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2671,7 +2671,6 @@ LogicalResult cir::UnaryOp::verify() {
   switch (getKind()) {
   case cir::UnaryOpKind::Inc:
   case cir::UnaryOpKind::Dec:
-  case cir::UnaryOpKind::Plus:
   case cir::UnaryOpKind::Minus:
   case cir::UnaryOpKind::Not:
     // Nothing to verify.
@@ -2712,9 +2711,8 @@ OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
   // input attribute from the adapter, a new constant is materialized, but
   // if we return the input value directly, it avoids that.
   if (auto srcConst = getInput().getDefiningOp<cir::ConstantOp>()) {
-    if (getKind() == cir::UnaryOpKind::Plus ||
-        (mlir::isa<cir::BoolType>(srcConst.getType()) &&
-         getKind() == cir::UnaryOpKind::Minus))
+    if (mlir::isa<cir::BoolType>(srcConst.getType()) &&
+        getKind() == cir::UnaryOpKind::Minus)
       return srcConst.getResult();
   }
 
@@ -2733,8 +2731,6 @@ OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
                 val.flipAllBits();
                 return cir::IntAttr::get(getType(), val);
               }
-              case cir::UnaryOpKind::Plus:
-                return attrT;
               case cir::UnaryOpKind::Minus: {
                 APInt val = attrT.getValue();
                 val.negate();
@@ -2746,8 +2742,6 @@ OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
             })
             .Case<cir::FPAttr>([&](cir::FPAttr attrT) {
               switch (getKind()) {
-              case cir::UnaryOpKind::Plus:
-                return attrT;
               case cir::UnaryOpKind::Minus: {
                 APFloat val = attrT.getValue();
                 val.changeSign();
@@ -2761,7 +2755,6 @@ OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
               switch (getKind()) {
               case cir::UnaryOpKind::Not:
                 return cir::BoolAttr::get(getContext(), !attrT.getValue());
-              case cir::UnaryOpKind::Plus:
               case cir::UnaryOpKind::Minus:
                 return attrT;
               default:
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 232d320d71f37..ad6c49ca87183 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -822,7 +822,6 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
     resultImag = operandImag;
     break;
 
-  case cir::UnaryOpKind::Plus:
   case cir::UnaryOpKind::Minus:
     resultReal = builder.createUnaryOp(loc, opKind, operandReal);
     resultImag = builder.createUnaryOp(loc, opKind, operandImag);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 8bcd040382ab3..58e7c8a65a1cd 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2726,7 +2726,7 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
   mlir::Type llvmType = getTypeConverter()->convertType(type);
   mlir::Location loc = op.getLoc();
 
-  // Integer unary operations: + - ~ ++ --
+  // Integer unary operations: - ~ ++ --
   if (mlir::isa<cir::IntType>(elementType)) {
     mlir::LLVM::IntegerOverflowFlags maybeNSW =
         op.getNoSignedWrap() ? mlir::LLVM::IntegerOverflowFlags::nsw
@@ -2746,9 +2746,6 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
                                                      one, maybeNSW);
       return mlir::success();
     }
-    case cir::UnaryOpKind::Plus:
-      rewriter.replaceOp(op, adaptor.getInput());
-      return mlir::success();
     case cir::UnaryOpKind::Minus: {
       mlir::Value zero;
       if (isVector)
@@ -2780,7 +2777,7 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
     llvm_unreachable("Unexpected unary op for int");
   }
 
-  // Floating point unary operations: + - ++ --
+  // Floating point unary operations: - ++ --
   if (mlir::isa<cir::FPTypeInterface>(elementType)) {
     switch (op.getKind()) {
     case cir::UnaryOpKind::Inc: {
@@ -2799,9 +2796,6 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
                                                       adaptor.getInput());
       return mlir::success();
     }
-    case cir::UnaryOpKind::Plus:
-      rewriter.replaceOp(op, adaptor.getInput());
-      return mlir::success();
     case cir::UnaryOpKind::Minus:
       rewriter.replaceOpWithNewOp<mlir::LLVM::FNegOp>(op, llvmType,
                                                       adaptor.getInput());
@@ -2818,7 +2812,6 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
     switch (op.getKind()) {
     case cir::UnaryOpKind::Inc:
     case cir::UnaryOpKind::Dec:
-    case cir::UnaryOpKind::Plus:
     case cir::UnaryOpKind::Minus:
       // Some of these are allowed in source code, but we shouldn't get here
       // with a boolean type.
@@ -2834,19 +2827,6 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
     llvm_unreachable("Unexpected unary op for bool");
   }
 
-  // Pointer unary operations: + only.  (++ and -- of pointers are implemented
-  // with cir.ptr_stride, not cir.unary.)
-  if (mlir::isa<cir::PointerType>(elementType)) {
-    switch (op.getKind()) {
-    case cir::UnaryOpKind::Plus:
-      rewriter.replaceOp(op, adaptor.getInput());
-      return mlir::success();
-    default:
-      op.emitError() << "Unknown pointer unary operation during CIR lowering";
-      return mlir::failure();
-    }
-  }
-
   return op.emitError() << "Unary operation has unsupported type: "
                         << elementType;
 }
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 54f18064c8cbc..92129a041fd8e 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -39,9 +39,9 @@ static const ComplexType *getComplexType(QualType type) {
   }
 }
 
-namespace  {
+namespace {
 class ComplexExprEmitter
-  : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+    : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
   CodeGenFunction &CGF;
   CGBuilderTy &Builder;
   bool IgnoreReal;
@@ -108,7 +108,9 @@ class ComplexExprEmitter
                            Result->getAggregateElement(1U));
     return Visit(E->getSubExpr());
   }
-  ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+  ComplexPairTy VisitParenExpr(ParenExpr *PE) {
+    return Visit(PE->getSubExpr());
+  }
   ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
     return Visit(GE->getResultExpr());
   }
@@ -185,15 +187,15 @@ class ComplexExprEmitter
     if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
       CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
     if (E->changesVolatileQualification())
-       return EmitLoadOfLValue(E);
+      return EmitLoadOfLValue(E);
     return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
   }
   ComplexPairTy VisitCallExpr(const CallExpr *E);
   ComplexPairTy VisitStmtExpr(const StmtExpr *E);
 
   // Operators.
-  ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
-                                   bool isInc, bool isPre) {
+  ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E, bool isInc,
+                                   bool isPre) {
     LValue LV = CGF.EmitLValue(E->getSubExpr());
     return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
   }
@@ -217,7 +219,7 @@ class ComplexExprEmitter
   ComplexPairTy VisitUnaryMinus(const UnaryOperator *E,
                                 QualType PromotionType = QualType());
   ComplexPairTy VisitMinus(const UnaryOperator *E, QualType PromotionType);
-  ComplexPairTy VisitUnaryNot      (const UnaryOperator *E);
+  ComplexPairTy VisitUnaryNot(const UnaryOperator *E);
   // LNot,Real,Imag never return complex.
   ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
     return Visit(E->getSubExpr());
@@ -247,15 +249,14 @@ class ComplexExprEmitter
   ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
     assert(E->getType()->isAnyComplexType() && "Expected complex type!");
     QualType Elem = E->getType()->castAs<ComplexType>()->getElementType();
-    llvm::Constant *Null =
-                       llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+    llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
     return ComplexPairTy(Null, Null);
   }
 
   struct BinOpInfo {
     ComplexPairTy LHS;
     ComplexPairTy RHS;
-    QualType Ty;  // Computation Type.
+    QualType Ty; // Computation Type.
     FPOptions FPFeatures;
   };
 
@@ -263,13 +264,13 @@ class ComplexExprEmitter
                        QualType PromotionTy = QualType());
   ComplexPairTy EmitPromoted(const Expr *E, QualType PromotionTy);
   ComplexPairTy EmitPromotedComplexOperand(const Expr *E, QualType PromotionTy);
-  LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
-                                  ComplexPairTy (ComplexExprEmitter::*Func)
-                                  (const BinOpInfo &),
-                                  RValue &Val);
-  ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
-                                   ComplexPairTy (ComplexExprEmitter::*Func)
-                                   (const BinOpInfo &));
+  LValue EmitCompoundAssignLValue(
+      const CompoundAssignOperator *E,
+      ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo &),
+      RValue &Val);
+  ComplexPairTy EmitCompoundAssign(
+      const CompoundAssignOperator *E,
+      ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo &));
 
   ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
   ComplexPairTy EmitBinSub(const BinOpInfo &Op);
@@ -381,11 +382,9 @@ class ComplexExprEmitter
 
   // No comparisons produce a complex result.
 
-  LValue EmitBinAssignLValue(const BinaryOperator *E,
-                             ComplexPairTy &Val);
-  ComplexPairTy VisitBinAssign     (const BinaryOperator *E);
-  ComplexPairTy VisitBinComma      (const BinaryOperator *E);
-
+  LValue EmitBinAssignLValue(const BinaryOperator *E, ComplexPairTy &Val);
+  ComplexPairTy VisitBinAssign(const BinaryOperator *E);
+  ComplexPairTy VisitBinComma(const BinaryOperator *E);
 
   ComplexPairTy
   VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
@@ -407,7 +406,7 @@ class ComplexExprEmitter
     return Visit(E->getSelectedExpr());
   }
 };
-}  // end anonymous namespace.
+} // end anonymous namespace.
 
 //===----------------------------------------------------------------------===//
 //                                Utilities
@@ -469,8 +468,6 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
   CGF.addInstToCurrentSourceAtom(I, Val.second);
 }
 
-
-
 //===----------------------------------------------------------------------===//
 //                            Visitor Methods
 //===----------------------------------------------------------------------===//
@@ -478,18 +475,17 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
 ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
   CGF.ErrorUnsupported(E, "complex expression");
   llvm::Type *EltTy =
-    CGF.ConvertType(getComplexType(E->getType())->getElementType());
+      CGF.ConvertType(getComplexType(E->getType())->getElementType());
   llvm::Value *U = llvm::PoisonValue::get(EltTy);
   return ComplexPairTy(U, U);
 }
 
-ComplexPairTy ComplexExprEmitter::
-VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ComplexPairTy
+ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
   llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
   return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
 }
 
-
 ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
   if (E->getCallReturnType(CGF.getContext())->isReferenceType())
     return EmitLoadOfLValue(E);
@@ -539,7 +535,8 @@ ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
 ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
                                            QualType DestTy) {
   switch (CK) {
-  case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+  case CK_Dependent:
+    llvm_unreachable("dependent cast kind in IR gen!");
 
   // Atomic to non-atomic casts may be more than a no-op for some platforms and
   // for some types.
@@ -691,10 +688,10 @@ ComplexPairTy ComplexExprEmitter::VisitMinus(const UnaryOperator *E,
 
   llvm::Value *ResR, *ResI;
   if (Op.first->getType()->isFloatingPointTy()) {
-    ResR = Builder.CreateFNeg(Op.first,  "neg.r");
+    ResR = Builder.CreateFNeg(Op.first, "neg.r");
     ResI = Builder.CreateFNeg(Op.second, "neg.i");
   } else {
-    ResR = Builder.CreateNeg(Op.first,  "neg.r");
+    ResR = Builder.CreateNeg(Op.first, "neg.r");
     ResI = Builder.CreateNeg(Op.second, "neg.i");
   }
   return ComplexPairTy(ResR, ResI);
@@ -719,14 +716,14 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
 
   if (Op.LHS.first->getType()->isFloatingPointTy()) {
     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
-    ResR = Builder.CreateFAdd(Op.LHS.first,  Op.RHS.first,  "add.r");
+    ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
     if (Op.LHS.second && Op.RHS.second)
       ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
     else
       ResI = Op.LHS.second ? Op.LHS.second : Op.RHS.second;
     assert(ResI && "Only one operand may be real!");
   } else {
-    ResR = Builder.CreateAdd(Op.LHS.first,  Op.RHS.first,  "add.r");
+    ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
     assert(Op.LHS.second && Op.RHS.second &&
            "Both operands of integer complex operators must be complex!");
     ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
@@ -884,11 +881,13 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
       // Finally continue execution by phi-ing together the different
       // computation paths.
       CGF.EmitBlock(ContBB);
-      llvm::PHINode *RealPHI = Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
+      llvm::PHINode *RealPHI =
+          Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
       RealPHI->addIncoming(ResR, OrigBB);
       RealPHI->addIncoming(ResR, INaNBB);
       RealPHI->addIncoming(LibCallR, LibCallBB);
-      llvm::PHINode *ImagPHI = Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
+      llvm::PHINode *ImagPHI =
+          Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
       ImagPHI->addIncoming(ResI, OrigBB);
       ImagPHI->addIncoming(ResI, INaNBB);
       ImagPHI->addIncoming(LibCallI, LibCallBB);
@@ -1097,7 +1096,9 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
     llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
     llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
 
-    if (Op.Ty->castAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+    if (Op.Ty->castAs<ComplexType>()
+            ->getElementType()
+            ->isUnsignedIntegerType()) {
       DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
       DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
     } else {
@@ -1209,11 +1210,9 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E,
   return Ops;
 }
 
-
-LValue ComplexExprEmitter::
-EmitCompoundAssignLValue(const CompoundAssignOperator *E,
-          ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
-                         RValue &Val) {
+LValue ComplexExprEmitter::EmitCompoundAssignLValue(
+    const CompoundAssignOperator *E,
+    ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val) {
   TestAndClearIgnoreReal();
   TestAndClearIgnoreImag();
   QualType LHSTy = E->getLHS()->getType();
@@ -1323,9 +1322,9 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
 }
 
 // Compound assignments.
-ComplexPairTy ComplexExprEmitter::
-EmitCompoundAssign(const CompoundAssignOperator *E,
-                   ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ComplexPairTy ComplexExprEmitter::EmitCompoundAssign(
+    const CompoundAssignOperator *E,
+    ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo &)) {
   RValue Val;
   LValue LV = EmitCompoundAssignLValue(E, Func, Val);
 
@@ -1381,8 +1380,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
   return Visit(E->getRHS());
 }
 
-ComplexPairTy ComplexExprEmitter::
-VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ComplexPairTy ComplexExprEmitter::VisitAbstractConditionalOperator(
+    const AbstractConditionalOperator *E) {
   TestAndClearIgnoreReal();
   TestAndClearIgnoreImag();
   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
@@ -1392,7 +1391,6 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
   // Bind the common expression if necessary.
   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
 
-
   CodeGenFunction::ConditionalEvaluation eval(CGF);
   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
                            CGF.getProfileCount(E));
@@ -1431,12 +1429,12 @@ ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
 }
 
 ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
-    bool Ignore = TestAndClearIgnoreReal();
-    (void)Ignore;
-    assert (Ignore == false && "init list ignored");
-    Ignore = TestAndClearIgnoreImag();
-    (void)Ignore;
-    assert (Ignore == false && "init list ignored");
+  bool Ignore = TestAndClearIgnoreReal();
+  (void)Ignore;
+  assert(Ignore == false && "init list ignored");
+  Ignore = TestAndClearIgnoreImag();
+  (void)Ignore;
+  assert(Ignore == false && "init list ignored");
 
   if (E->getNumInits() == 2) {
     llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
@@ -1449,8 +1447,8 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
   // Empty init list initializes to null
   assert(E->getNumInits() == 0 && "Unexpected number of inits");
   QualType Ty = E->getType()->castAs<ComplexType>()->getElementType();
-  llvm::Type* LTy = CGF.ConvertType(Ty);
-  llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+  llvm::Type *LTy = CGF.ConvertType(Ty);
+  llvm::Value *zeroConstant = llvm::Constant::getNullValue(LTy);
   return ComplexPairTy(zeroConstant, zeroConstant);
 }
 
@@ -1461,7 +1459,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
   if (!ArgValue.isValid()) {
     CGF.ErrorUnsupported(E, "complex va_arg expression");
     llvm::Type *EltTy =
-      CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
+        CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
     llvm::Value *U = llvm::PoisonValue::get(EltTy);
     return ComplexPairTy(U, U);
   }
@@ -1489,7 +1487,7 @@ void CodeGenFunction::EmitComplexExprIntoLValue(const Expr *E, LValue dest,
   assert(E && getComplexType(E->getType()) &&
          "Invalid complex expression to emit");
   ComplexExprEmitter Emitter(*this);
-  ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+  ComplexPairTy Val = Emitter.Visit(const_cast<Expr *>(E));
   Emitter.EmitStoreOfComplex(Val, dest, isInit);
 }
 
@@ -1520,26 +1518,29 @@ typedef ComplexPairTy (ComplexExprEmitter::*CompoundFunc)(
 
 static CompoundFunc getComplexOp(BinaryOperatorKind Op) {
   switch (Op) {
-  case BO_MulAssign: return &ComplexExprEmitter::EmitBinMul;
-  case BO_DivAssign: return &ComplexExprEmitter::EmitBinDiv;
-  case BO_SubAssign: return &ComplexExprEmitter::EmitBinSub;
-  case BO_AddAssign: return &ComplexExprEmitter::EmitBinAdd;
+  case BO_MulAssign:
+    return &ComplexExprEmitter::EmitBinMul;
+  case BO_DivAssign:
+    return &ComplexExprEmitter::EmitBinDiv;
+  case BO_SubAssign:
+    return &ComplexExprEmitter::EmitBinSub;
+  case BO_AddAssign:
+    return &ComplexExprEmitter::EmitBinAdd;
   default:
     llvm_unreachable("unexpected complex compound assignment");
   }
 }
 
-LValue CodeGenFunction::
-EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+LValue CodeGenFunction::EmitComplexCompoundAssignmentLValue(
+    const CompoundAssignOperator *E) {
   ApplyAtomGroup Grp(getDebugInfo());
   CompoundFunc Op = getComplexOp(E->getOpcode());
   RValue Val;
   return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
 }
 
-LValue CodeGenFunction::
-EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
-                                    llvm::Value *&Result) {
+LValue CodeGenFunction::EmitScalarCompoundAssignWithComplex(
+    const CompoundAssignOperator *E, llvm::Value *&Result) {
   // Key Instructions: Don't need to create an atom group here; one will already
   // be active through scalar handling code.
   CompoundFunc Op = getComplexOp(E->getOpcode());
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp
index a8e434b903763..31cc0d8b8de56 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -293,27 +293,19 @@ void foo7() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_PLUS:.*]] = cir.unary(plus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
-// CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_PLUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+// CIR-BEFORE-NOT: cir.unary
+// CIR-BEFORE: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_PLUS:.*]] = cir.unary(plus, %[[REAL]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[IMAG_PLUS:.*]] = cir.unary(plus, %[[IMAG]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_PLUS]], %[[IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
-// CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+// CIR-AFTER-NOT: cir.unary
+// CIR-AFTER: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
 // LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
 // LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
-// LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
-// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
-// LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL]], 0
-// LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG]], 1
-// LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4
+// LLVM: store { float, float } %[[TMP]], ptr %[[B_ADDR]], align 4
 
 // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
 // OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
@@ -381,8 +373,8 @@ void foo9() {
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
 // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
 // CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.complex<!cir.float>
-// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
+// CIR-BEFORE-NOT: cir.unary
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
 // CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
@@ -393,13 +385,9 @@ void foo9() {
 // CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float
 // CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float
 // CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-NOT: cir.unary
 // CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16
 // CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16
 // CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
@@ -412,10 +400,6 @@ void foo9() {
 // LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
 // LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
 // LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
-// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
-// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
-// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
-// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
 // LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
 // LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
 // LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, half %[[A_REAL_F16]], 0
diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp
index 48b7dd72ae661..d54fcb1c3ccf4 100644
--- a/clang/test/CIR/CodeGen/complex.cpp
+++ b/clang/test/CIR/CodeGen/complex.cpp
@@ -1034,21 +1034,13 @@ void real_on_non_glvalue() {
 // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
 // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_PLUS:.*]] = cir.unary(plus, %[[A_REAL]]) : !cir.float, !cir.float
-// CIR: %[[A_IMAG_PLUS:.*]] = cir.unary(plus, %[[A_IMAG]]) : !cir.float, !cir.float
-// CIR: %[[RESULT:.*]] = cir.complex.create %[[A_REAL_PLUS]], %[[A_IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
 // CIR: cir.store{{.*}} %[[RESULT_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
 // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
 // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
-// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
-// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL]], 0
-// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1
 // LLVM: store float %[[A_REAL]], ptr %[[B_ADDR]], align 4
 
 // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
@@ -1067,21 +1059,13 @@ void imag_on_non_glvalue() {
 // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
 // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_REAL_PLUS:.*]] = cir.unary(plus, %[[A_REAL]]) : !cir.float, !cir.float
-// CIR: %[[A_IMAG_PLUS:.*]] = cir.unary(plus, %[[A_IMAG]]) : !cir.float, !cir.float
-// CIR: %[[RESULT:.*]] = cir.complex.create %[[A_REAL_PLUS]], %[[A_IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[RESULT_IMAG:.*]] = cir.complex.imag %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
 // CIR: cir.store{{.*}} %[[RESULT_IMAG]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
 // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
 // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
-// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
-// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL]], 0
-// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1
 // LLVM: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4
 
 // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
diff --git a/clang/test/CIR/CodeGen/coro-task.cpp b/clang/test/CIR/CodeGen/coro-task.cpp
index 637b058443bc7..b52f0f1871079 100644
--- a/clang/test/CIR/CodeGen/coro-task.cpp
+++ b/clang/test/CIR/CodeGen/coro-task.cpp
@@ -569,8 +569,7 @@ folly::coro::Task<int> go4() {
 
 // Get the lambda invoker ptr via `lambda operator folly::coro::Task<int> (*)(int const&)()`
 // CIR: %[[INVOKER:.*]] = cir.call @_ZZ3go4vENK3$_0cvPFN5folly4coro4TaskIiEERKiEEv(%{{.*}}) nothrow : {{.*}} -> (!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>> {llvm.noundef})
-// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[INVOKER]]) : !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>, !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>
-// CIR: cir.store{{.*}} %[[PLUS]], %[[FN_ADDR:.*]] : !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>, !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>>
+// CIR: cir.store{{.*}} %[[INVOKER]], %[[FN_ADDR:.*]] : !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>, !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>>
 // CIR: cir.scope {
 // CIR:   %[[ARG:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["ref.tmp2", init] {alignment = 4 : i64}
 // CIR:   %[[FN:.*]] = cir.load{{.*}} %[[FN_ADDR]] : !cir.ptr<!cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>>, !cir.ptr<!cir.func<(!cir.ptr<!s32i>) -> ![[IntTask]]>>
diff --git a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
index 2b00feccbc79a..25c17d6b37dde 100644
--- a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
+++ b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
@@ -121,10 +121,9 @@ int g3() {
 
 // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`.
 // CIR:   %[[OPERATOR_RESULT:.*]] = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[LAM_ALLOCA]]){{.*}}
-// CIR:   %[[PLUS:.*]] = cir.unary(plus, %[[OPERATOR_RESULT]])
 
 // 2. Load ptr to `__invoke()`.
-// CIR:   cir.store{{.*}} %[[PLUS]], %[[FN_ADDR]]
+// CIR:   cir.store{{.*}} %[[OPERATOR_RESULT]], %[[FN_ADDR]]
 // CIR:   %[[FN:.*]] = cir.load{{.*}} %[[FN_ADDR]]
 // CIR:   %[[THREE:.*]] = cir.const #cir.int<3> : !s32i
 // CIR:   cir.store{{.*}} %[[THREE]], %[[REF_TMP1]]
diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp
index cc6f2ade38f3f..7d4c8f27e4545 100644
--- a/clang/test/CIR/CodeGen/unary.cpp
+++ b/clang/test/CIR/CodeGen/unary.cpp
@@ -13,7 +13,7 @@ unsigned up0() {
 // CHECK: cir.func{{.*}} @_Z3up0v() -> (!u32i{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
+// CHECK-NOT: cir.unary
 
 // LLVM: define{{.*}} i32 @_Z3up0v()
 // LLVM:   %[[RV:.*]] = alloca i32, i64 1, align 4
@@ -231,7 +231,7 @@ float fpPlus() {
 // CHECK: cir.func{{.*}} @_Z6fpPlusv() -> (!cir.float{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
+// CHECK-NOT: cir.unary
 
 // LLVM: define{{.*}} float @_Z6fpPlusv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -411,7 +411,7 @@ void chars(char c) {
 
   int c1 = +c;
   // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
-  // CHECK: cir.unary(plus, %[[PROMO]]) : !s32i, !s32i
+  // CHECK-NOT: cir.unary
   int c2 = -c;
   // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
   // CHECK: cir.unary(minus, %[[PROMO]]) nsw : !s32i, !s32i
@@ -432,8 +432,8 @@ _Float16 fp16UPlus(_Float16 f) {
 // CHECK: cir.func{{.*}} @_Z9fp16UPlusDF16_({{.*}}) -> (!cir.f16{{.*}})
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]]
 // CHECK:   %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float
-// CHECK:   %[[RESULT:.*]] = cir.unary(plus, %[[PROMOTED]])
-// CHECK:   %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16
+// CHECK-NOT: cir.unary
+// CHECK:   %[[UNPROMOTED:.*]] = cir.cast floating %[[PROMOTED]] : !cir.float -> !cir.f16
 
 // LLVM: define{{.*}} half @_Z9fp16UPlusDF16_({{.*}})
 // LLVM:   %[[F_LOAD:.*]] = load half, ptr %{{.*}}, align 2
@@ -567,9 +567,8 @@ void f16NestedUPlus() {
 // CHECK:  %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
 // CHECK:  %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
 // CHECK:  %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
-// CHECK:  %[[A_PLUS:.*]] = cir.unary(plus, %[[A_F32]]) : !cir.float, !cir.float
-// CHECK:  %[[RESULT_F32:.*]] = cir.unary(plus, %[[A_PLUS]]) : !cir.float, !cir.float
-// CHECK:  %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16
+// CHECK-NOT: cir.unary
+// CHECK:  %[[RESULT:.*]] = cir.cast floating %[[A_F32]] : !cir.float -> !cir.f16
 // CHECK:  cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
 
 // LLVM: define{{.*}} void @_Z14f16NestedUPlusv()
diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp
index e7d6b4974c6ae..a32bc94bae103 100644
--- a/clang/test/CIR/CodeGen/vector-ext.cpp
+++ b/clang/test/CIR/CodeGen/vector-ext.cpp
@@ -332,8 +332,7 @@ void foo8() {
 // CIR-SAME: #cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP1:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[TMP1]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
-// CIR: cir.store{{.*}} %[[PLUS]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: cir.store{{.*}} %[[TMP1]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP2:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
 // CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp
index 8a804ad6fa2f1..26b4e4a8f7f8a 100644
--- a/clang/test/CIR/CodeGen/vector.cpp
+++ b/clang/test/CIR/CodeGen/vector.cpp
@@ -319,8 +319,7 @@ void foo8() {
 // CIR-SAME: #cir.int<3> : !s32i, #cir.int<4> : !s32i]> : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[VEC_VAL]], %[[VEC]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP1:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[TMP1]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
-// CIR: cir.store{{.*}} %[[PLUS]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
+// CIR: cir.store{{.*}} %[[TMP1]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP2:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
 // CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
diff --git a/clang/test/CIR/IR/unary.cir b/clang/test/CIR/IR/unary.cir
index d01d4eb3c920a..0fdf1dcfdfb47 100644
--- a/clang/test/CIR/IR/unary.cir
+++ b/clang/test/CIR/IR/unary.cir
@@ -9,42 +9,38 @@ module {
   cir.func @test_unary_unsigned() {
     %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a"] {alignment = 4 : i64}
     %1 = cir.load %0 : !cir.ptr<!u32i>, !u32i
-    %2 = cir.unary(plus, %1) : !u32i, !u32i
-    %3 = cir.unary(minus, %1) : !u32i, !u32i
-    %4 = cir.unary(not, %1) : !u32i, !u32i
-    %5 = cir.unary(inc, %1) : !u32i, !u32i
-    %6 = cir.unary(dec, %1) : !u32i, !u32i
+    %2 = cir.unary(minus, %1) : !u32i, !u32i
+    %3 = cir.unary(not, %1) : !u32i, !u32i
+    %4 = cir.unary(inc, %1) : !u32i, !u32i
+    %5 = cir.unary(dec, %1) : !u32i, !u32i
     cir.return
   }
 // CHECK: cir.func{{.*}} @test_unary_unsigned() {
 // CHECK:   %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a"] {alignment = 4 : i64}
 // CHECK:   %1 = cir.load %0 : !cir.ptr<!u32i>, !u32i
-// CHECK:   %2 = cir.unary(plus, %1) : !u32i, !u32i
-// CHECK:   %3 = cir.unary(minus, %1) : !u32i, !u32i
-// CHECK:   %4 = cir.unary(not, %1) : !u32i, !u32i
-// CHECK:   %5 = cir.unary(inc, %1) : !u32i, !u32i
-// CHECK:   %6 = cir.unary(dec, %1) : !u32i, !u32i
+// CHECK:   %2 = cir.unary(minus, %1) : !u32i, !u32i
+// CHECK:   %3 = cir.unary(not, %1) : !u32i, !u32i
+// CHECK:   %4 = cir.unary(inc, %1) : !u32i, !u32i
+// CHECK:   %5 = cir.unary(dec, %1) : !u32i, !u32i
 // CHECK:   cir.return
 // CHECK: }
 
   cir.func @test_unary_signed() {
     %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"] {alignment = 4 : i64}
     %1 = cir.load %0 : !cir.ptr<!s32i>, !s32i
-    %2 = cir.unary(plus, %1) : !s32i, !s32i
-    %3 = cir.unary(minus, %1) nsw : !s32i, !s32i
-    %4 = cir.unary(not, %1) : !s32i, !s32i
-    %5 = cir.unary(inc, %1) nsw : !s32i, !s32i
-    %6 = cir.unary(dec, %1) nsw : !s32i, !s32i
+    %2 = cir.unary(minus, %1) nsw : !s32i, !s32i
+    %3 = cir.unary(not, %1) : !s32i, !s32i
+    %4 = cir.unary(inc, %1) nsw : !s32i, !s32i
+    %5 = cir.unary(dec, %1) nsw : !s32i, !s32i
     cir.return
   }
 // CHECK: cir.func{{.*}} @test_unary_signed() {
 // CHECK:   %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"] {alignment = 4 : i64}
 // CHECK:   %1 = cir.load %0 : !cir.ptr<!s32i>, !s32i
-// CHECK:   %2 = cir.unary(plus, %1) : !s32i, !s32i
-// CHECK:   %3 = cir.unary(minus, %1) nsw : !s32i, !s32i
-// CHECK:   %4 = cir.unary(not, %1) : !s32i, !s32i
-// CHECK:   %5 = cir.unary(inc, %1) nsw : !s32i, !s32i
-// CHECK:   %6 = cir.unary(dec, %1) nsw : !s32i, !s32i
+// CHECK:   %2 = cir.unary(minus, %1) nsw : !s32i, !s32i
+// CHECK:   %3 = cir.unary(not, %1) : !s32i, !s32i
+// CHECK:   %4 = cir.unary(inc, %1) nsw : !s32i, !s32i
+// CHECK:   %5 = cir.unary(dec, %1) nsw : !s32i, !s32i
 // CHECK:   cir.return
 // CHECK: }
 }
diff --git a/clang/test/CIR/Transforms/canonicalize.cir b/clang/test/CIR/Transforms/canonicalize.cir
index cfac73ecdb738..7476126818082 100644
--- a/clang/test/CIR/Transforms/canonicalize.cir
+++ b/clang/test/CIR/Transforms/canonicalize.cir
@@ -117,78 +117,6 @@ module {
   // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.int<4294967294> : !u32i
   // CHECK-NEXT:   cir.return %[[CONST]] : !u32i
 
-  cir.func @unary_plus_true() -> !cir.bool {
-    %0 = cir.const #true
-    %1 = cir.unary(plus, %0) : !cir.bool, !cir.bool
-    cir.return %1 : !cir.bool
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_true() -> !cir.bool
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #true
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.bool
-
-  cir.func @unary_plus_false() -> !cir.bool {
-    %0 = cir.const #false
-    %1 = cir.unary(plus, %0) : !cir.bool, !cir.bool
-    cir.return %1 : !cir.bool
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_false() -> !cir.bool
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #false
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.bool
-
-  cir.func @unary_plus_int() -> !s32i {
-    %0 = cir.const #cir.int<1> : !s32i
-    %1 = cir.unary(plus, %0) : !s32i, !s32i
-    cir.return %1 : !s32i
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_int() -> !s32i
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.int<1> : !s32i
-  // CHECK-NEXT:   cir.return %[[CONST]] : !s32i
-
-  cir.func @unary_plus_uint() -> !u32i {
-    %0 = cir.const #cir.int<1> : !u32i
-    %1 = cir.unary(plus, %0) : !u32i, !u32i
-    cir.return %1 : !u32i
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_uint() -> !u32i
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.int<1> : !u32i
-  // CHECK-NEXT:   cir.return %[[CONST]] : !u32i
-
-  cir.func @unary_plus_float() -> !cir.float {
-    %0 = cir.const #cir.fp<1.100000e+00> : !cir.float
-    %1 = cir.unary(plus, %0) : !cir.float, !cir.float
-    cir.return %1 : !cir.float
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_float() -> !cir.float
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.fp<1.100000e+00> : !cir.float
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.float
-
-  cir.func @unary_plus_double() -> !cir.double {
-    %0 = cir.const #cir.fp<1.100000e+00> : !cir.double
-    %1 = cir.unary(plus, %0) : !cir.double, !cir.double
-    cir.return %1 : !cir.double
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_double() -> !cir.double
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.fp<1.100000e+00> : !cir.double
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.double
-
-  cir.func @unary_plus_nan() -> !cir.float {
-    %0 = cir.const #cir.fp<0x7F800000> : !cir.float
-    %1 = cir.unary(plus, %0) : !cir.float, !cir.float
-    cir.return %1 : !cir.float
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_nan() -> !cir.float
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.fp<0x7F800000> : !cir.float
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.float
-
-  cir.func @unary_plus_neg_nan() -> !cir.float {
-    %0 = cir.const #cir.fp<0xFF800000> : !cir.float
-    %1 = cir.unary(plus, %0) : !cir.float, !cir.float
-    cir.return %1 : !cir.float
-  }
-  // CHECK:      cir.func{{.*}} @unary_plus_neg_nan() -> !cir.float
-  // CHECK-NEXT:   %[[CONST:.*]] = cir.const #cir.fp<0xFF800000> : !cir.float
-  // CHECK-NEXT:   cir.return %[[CONST]] : !cir.float
-
   cir.func @unary_minus_true() -> !cir.bool {
     %0 = cir.const #true
     %1 = cir.unary(minus, %0) : !cir.bool, !cir.bool

>From 12564d97270f0db1e6f4e63f0f440f5d59d1ca73 Mon Sep 17 00:00:00 2001
From: xlauko <xlauko at mail.muni.cz>
Date: Sat, 7 Mar 2026 12:15:01 +0100
Subject: [PATCH 33/33] [CIR] Split CIR_UnaryOp into individual operations

Split the monolithic cir.unary operation (which dispatched on a
UnaryOpKind enum) into four separate operations: cir.inc, cir.dec,
cir.minus, and cir.not.

This follows the same pattern used when cir.binop was split into
individual binary operations (AddOp, SubOp, etc.).

Changes:
- Add CIR_UnaryOpInterface with getInput()/getResult() methods
- Add CIR_UnaryOp and CIR_UnaryOpWithOverflowFlag base classes
- Define IncOp, DecOp, MinusOp, NotOp with per-op folds
- Add Involution trait to NotOp for not(not(x)) -> x folding
- Replace createUnaryOp() with createInc/Dec/Minus/Not builders
- Split LLVM lowering into four separate patterns
- Split LoweringPrepare complex-type handling per unary op
- Update CIRCanonicalize and CIRSimplify for new op types
- Update all codegen files to use bool params instead of UnaryOpKind
- Remove CIR_UnaryOpKind enum and old CIR_UnaryOp definition
- Update 82 test files for new assembly format

Assembly format change:
  cir.unary(inc, %x) nsw : !s32i, !s32i  ->  cir.inc nsw %x : !s32i
  cir.unary(not, %x) : !u32i, !u32i      ->  cir.not %x : !u32i
---
 .../CIR/Dialect/Builder/CIRBaseBuilder.h      |  23 ++-
 clang/include/clang/CIR/Dialect/IR/CIROps.td  | 139 ++++++++++---
 .../clang/CIR/Interfaces/CIROpInterfaces.td   |  21 ++
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         |   6 +-
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp       |   6 +-
 clang/lib/CIR/CodeGen/CIRGenExpr.cpp          |  13 +-
 clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp   |  28 +--
 clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp    |  71 +++----
 clang/lib/CIR/CodeGen/CIRGenFunction.h        |   6 +-
 clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp |  11 +-
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp       | 144 +++++--------
 .../Dialect/Transforms/CIRCanonicalize.cpp    |  12 +-
 .../CIR/Dialect/Transforms/CIRSimplify.cpp    |   7 +-
 .../Dialect/Transforms/LoweringPrepare.cpp    |  59 +++---
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 194 ++++++++----------
 clang/test/CIR/CodeGen/atomic.c               |  28 +--
 clang/test/CIR/CodeGen/bitfields.c            |   2 +-
 clang/test/CIR/CodeGen/complex-builtins.cpp   |   2 +-
 clang/test/CIR/CodeGen/complex-unary.cpp      |  48 ++---
 .../CodeGen/cxx-rewritten-binary-operator.cpp |   2 +-
 clang/test/CIR/CodeGen/dynamic-cast-exact.cpp |   2 +-
 clang/test/CIR/CodeGen/fold-during-cg.c       |  70 +++----
 clang/test/CIR/CodeGen/integer-overflow.c     |  28 +--
 clang/test/CIR/CodeGen/label.c                |   2 +-
 .../CIR/CodeGen/lambda-static-invoker.cpp     |   1 -
 clang/test/CIR/CodeGen/nrvo.cpp               |   2 +-
 clang/test/CIR/CodeGen/pointers.cpp           |   2 +-
 clang/test/CIR/CodeGen/static-vars.c          |   2 +-
 clang/test/CIR/CodeGen/static-vars.cpp        |   2 +-
 clang/test/CIR/CodeGen/try-catch.cpp          |   4 +-
 clang/test/CIR/CodeGen/unary.cpp              |  57 +++--
 clang/test/CIR/CodeGen/vector-ext.cpp         |   4 +-
 clang/test/CIR/CodeGen/vector.cpp             |   4 +-
 clang/test/CIR/CodeGen/vla.c                  |   2 +-
 .../CodeGenBuiltins/X86/avx512bw-builtins.c   |  12 +-
 .../CodeGenBuiltins/X86/avx512dq-builtins.c   |   6 +-
 .../CodeGenBuiltins/X86/avx512f-builtins.c    |   6 +-
 .../CIR/CodeGenBuiltins/builtin-fcmp-sse.c    |   8 +-
 .../builtin-types-compatible.c                |   2 +-
 .../CIR/CodeGenOpenACC/atomic-capture.cpp     |  24 +--
 .../test/CIR/CodeGenOpenACC/atomic-update.cpp |   6 +-
 .../combined-firstprivate-clause.cpp          |  14 +-
 .../combined-private-clause.cpp               |   4 +-
 .../combined-reduction-clause-default-ops.cpp |  54 ++---
 .../combined-reduction-clause-float.cpp       |  36 ++--
 .../combined-reduction-clause-inline-ops.cpp  |  72 +++----
 .../combined-reduction-clause-int.cpp         |  54 ++---
 .../combined-reduction-clause-outline-ops.cpp |  72 +++----
 .../compute-firstprivate-clause.c             |   6 +-
 .../compute-firstprivate-clause.cpp           |  14 +-
 .../CodeGenOpenACC/compute-private-clause.cpp |   4 +-
 .../compute-reduction-clause-default-ops.c    |  54 ++---
 .../compute-reduction-clause-default-ops.cpp  |  54 ++---
 .../compute-reduction-clause-float.c          |  36 ++--
 .../compute-reduction-clause-float.cpp        |  36 ++--
 .../compute-reduction-clause-inline-ops.cpp   |  72 +++----
 .../compute-reduction-clause-int.c            |  54 ++---
 .../compute-reduction-clause-int.cpp          |  54 ++---
 .../compute-reduction-clause-outline-ops.cpp  |  72 +++----
 .../compute-reduction-clause-unsigned-int.c   |  54 ++---
 clang/test/CIR/CodeGenOpenACC/data.c          |   4 +-
 .../firstprivate-clause-recipes.cpp           |  46 ++---
 .../CodeGenOpenACC/loop-private-clause.cpp    |   4 +-
 .../loop-reduction-clause-default-ops.cpp     |  54 ++---
 .../loop-reduction-clause-float.cpp           |  36 ++--
 .../loop-reduction-clause-inline-ops.cpp      |  72 +++----
 .../loop-reduction-clause-int.cpp             |  54 ++---
 .../loop-reduction-clause-outline-ops.cpp     |  72 +++----
 .../private-clause-array-recipes-CtorDtor.cpp |  32 +--
 .../private-clause-array-recipes-NoOps.cpp    |  16 +-
 ...-clause-pointer-array-recipes-CtorDtor.cpp | 124 +++++------
 ...ate-clause-pointer-array-recipes-NoOps.cpp |  82 ++++----
 ...ivate-clause-pointer-array-recipes-int.cpp |  40 ++--
 ...rivate-clause-pointer-recipes-CtorDtor.cpp |  44 ++--
 .../private-clause-pointer-recipes-NoOps.cpp  |  32 +--
 .../private-clause-pointer-recipes-int.cpp    |  20 +-
 .../reduction-clause-recipes.cpp              |  64 +++---
 clang/test/CIR/IR/unary.cir                   |  32 +--
 clang/test/CIR/Transforms/canonicalize.cir    |  30 +--
 .../Transforms/flatten-cleanup-scope-eh.cir   |   6 +-
 .../flatten-cleanup-scope-multi-exit.cir      |   4 +-
 clang/test/CIR/Transforms/select.cir          |   2 +-
 82 files changed, 1390 insertions(+), 1364 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index f51bea894d2ae..88d7042410b45 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -231,9 +231,12 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
     return createLoad(loc, ptr, /*isVolatile=*/false, alignment);
   }
 
+  mlir::Value createNot(mlir::Location loc, mlir::Value value) {
+    return cir::NotOp::create(*this, loc, value);
+  }
+
   mlir::Value createNot(mlir::Value value) {
-    return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
-                                cir::UnaryOpKind::Not, value);
+    return createNot(value.getLoc(), value);
   }
 
   /// Create a do-while operation.
@@ -272,9 +275,19 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
     return cir::ContinueOp::create(*this, loc);
   }
 
-  mlir::Value createUnaryOp(mlir::Location loc, cir::UnaryOpKind kind,
-                            mlir::Value operand) {
-    return cir::UnaryOp::create(*this, loc, kind, operand);
+  mlir::Value createInc(mlir::Location loc, mlir::Value input,
+                        bool nsw = false) {
+    return cir::IncOp::create(*this, loc, input, nsw);
+  }
+
+  mlir::Value createDec(mlir::Location loc, mlir::Value input,
+                        bool nsw = false) {
+    return cir::DecOp::create(*this, loc, input, nsw);
+  }
+
+  mlir::Value createMinus(mlir::Location loc, mlir::Value input,
+                          bool nsw = false) {
+    return cir::MinusOp::create(*this, loc, input, nsw);
   }
 
   mlir::TypedAttr getConstPtrAttr(mlir::Type type, int64_t value) {
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 5d21a622d5e29..2354591ba3c86 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1703,49 +1703,132 @@ def CIR_LabelOp : CIR_Op<"label", [AlwaysSpeculatable]> {
 }
 
 //===----------------------------------------------------------------------===//
-// UnaryOp
+// Unary Operations
 //===----------------------------------------------------------------------===//
 
-def CIR_UnaryOpKind : CIR_I32EnumAttr<"UnaryOpKind", "unary operation kind", [
-  I32EnumAttrCase<"Inc",   0, "inc">,
-  I32EnumAttrCase<"Dec",   1, "dec">,
-  I32EnumAttrCase<"Minus", 2, "minus">,
-  I32EnumAttrCase<"Not",   3, "not">
-]>;
+// Base class for all CIR unary operations.
+// `type` constrains the input/result type.
+class CIR_UnaryOp<string mnemonic, Type type, list<Trait> traits = []>
+    : CIR_Op<mnemonic, !listconcat([
+        Pure, SameOperandsAndResultType,
+        DeclareOpInterfaceMethods<CIR_UnaryOpInterface>
+      ], traits)>
+{
+  let arguments = (ins type:$input);
+
+  let results = (outs type:$result);
+
+  let assemblyFormat = [{
+    $input `:` type($input) attr-dict
+  }];
+}
+
+// Base class for unary ops that support the nsw (no signed wrap) flag.
+class CIR_UnaryOpWithOverflowFlag<string mnemonic, Type type,
+                                   list<Trait> traits = []>
+    : CIR_UnaryOp<mnemonic, type, traits>
+{
+  let arguments = (ins type:$input, UnitProp:$no_signed_wrap);
+
+  let assemblyFormat = [{
+    (`nsw` $no_signed_wrap^)?
+    $input `:` type($input) attr-dict
+  }];
+}
 
-def CIR_UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> {
-  let summary = "Unary operations";
+//===----------------------------------------------------------------------===//
+// IncOp
+//===----------------------------------------------------------------------===//
+
+def CIR_IncOp : CIR_UnaryOpWithOverflowFlag<"inc", CIR_AnyType> {
+  let summary = "Increment by one";
   let description = [{
-    `cir.unary` performs the unary operation according to
-    the specified opcode kind: [inc, dec, minus, not].
+    The `cir.inc` operation increments the operand by one. The operand and
+    result must have the same type.
 
-    It requires one input operand and has one result, both types
-    should be the same.
+    The optional `nsw` (no signed wrap) attribute indicates that the result
+    is poison if signed overflow occurs.
 
-    If the `nsw` (no signed wrap) attribute is present, the result is poison if
-    signed overflow occurs.
+    Example:
 
     ```mlir
-    %7 = cir.unary(inc, %1) : i32 -> i32
-    %8 = cir.unary(dec, %2) nsw : i32 -> i32
+    %1 = cir.inc %0 : !s32i
+    %3 = cir.inc nsw %2 : !s32i
     ```
   }];
 
-  let arguments = (ins
-    Arg<CIR_UnaryOpKind, "unary op kind">:$kind,
-    Arg<CIR_AnyType>:$input,
-    UnitAttr:$no_signed_wrap
-  );
+  let hasFolder = 1;
+}
 
-  let results = (outs CIR_AnyType:$result);
+//===----------------------------------------------------------------------===//
+// DecOp
+//===----------------------------------------------------------------------===//
 
-  let assemblyFormat = [{
-      `(` $kind `,` $input `)`
-      (`nsw` $no_signed_wrap^)?
-      `:` type($input) `,` type($result) attr-dict
+def CIR_DecOp : CIR_UnaryOpWithOverflowFlag<"dec", CIR_AnyType> {
+  let summary = "Decrement by one";
+  let description = [{
+    The `cir.dec` operation decrements the operand by one. The operand and
+    result must have the same type.
+
+    The optional `nsw` (no signed wrap) attribute indicates that the result
+    is poison if signed overflow occurs.
+
+    Example:
+
+    ```mlir
+    %1 = cir.dec %0 : !s32i
+    %3 = cir.dec nsw %2 : !s32i
+    ```
+  }];
+
+  let hasFolder = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// MinusOp
+//===----------------------------------------------------------------------===//
+
+def CIR_MinusOp : CIR_UnaryOpWithOverflowFlag<"minus", CIR_AnyType> {
+  let summary = "Unary minus (negation)";
+  let description = [{
+    The `cir.minus` operation negates the operand. The operand and result
+    must have the same type.
+
+    The optional `nsw` (no signed wrap) attribute indicates that the result
+    is poison if signed overflow occurs (e.g. negating the minimum signed
+    integer).
+
+    Example:
+
+    ```mlir
+    %1 = cir.minus %0 : !s32i
+    %3 = cir.minus nsw %2 : !s32i
+    %5 = cir.minus %4 : !cir.float
+    ```
+  }];
+
+  let hasFolder = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// NotOp
+//===----------------------------------------------------------------------===//
+
+def CIR_NotOp : CIR_UnaryOp<"not", CIR_AnyType, [Involution]> {
+  let summary = "Bitwise NOT / logical NOT";
+  let description = [{
+    The `cir.not` operation performs a bitwise NOT on integer types or a
+    logical NOT on boolean types. The operand and result must have the same
+    type.
+
+    Example:
+
+    ```mlir
+    %1 = cir.not %0 : !s32i
+    %3 = cir.not %2 : !cir.bool
+    ```
   }];
 
-  let hasVerifier = 1;
   let hasFolder = 1;
 }
 
diff --git a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td
index c715e2e03a7b9..41885d4df8f20 100644
--- a/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td
+++ b/clang/include/clang/CIR/Interfaces/CIROpInterfaces.td
@@ -189,6 +189,27 @@ let cppNamespace = "::cir" in {
     ];
   }
 
+  def CIR_UnaryOpInterface : OpInterface<"UnaryOpInterface"> {
+    let description = [{
+      Common interface for CIR unary operations.
+      Provides uniform access to the input operand and result of any unary
+      operation in the CIR dialect.
+    }];
+
+    let methods = [
+      InterfaceMethod<"Return the input operand.",
+        "mlir::Value", "getInput", (ins), [{}],
+        /*defaultImplementation=*/[{
+          return $_op.getInput();
+        }]>,
+      InterfaceMethod<"Return the result value.",
+        "mlir::Value", "getResult", (ins), [{}],
+        /*defaultImplementation=*/[{
+          return $_op.getResult();
+        }]>,
+    ];
+  }
+
 } // namespace cir
 
 #endif // CLANG_CIR_INTERFACES_CIROPINTERFACES_TD
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index e2f89cc5aa12f..be21e3f29f879 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -429,8 +429,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
       // Source is a unsigned integer: first cast it to signed.
       if (intTy.isUnsigned())
         value = createIntCast(value, getSIntNTy(intTy.getWidth()));
-      return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
-                                  cir::UnaryOpKind::Minus, value);
+      return createMinus(value.getLoc(), value);
     }
 
     llvm_unreachable("negation for the given type is NYI");
@@ -444,8 +443,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     assert(!cir::MissingFeatures::fpConstraints());
     assert(!cir::MissingFeatures::fastMathFlags());
 
-    return cir::UnaryOp::create(*this, value.getLoc(), value.getType(),
-                                cir::UnaryOpKind::Minus, value);
+    return createMinus(value.getLoc(), value);
   }
 
   //===--------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 244979aac917e..1b59dc16cafb6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -182,8 +182,7 @@ static RValue emitBinaryAtomicPost(CIRGenFunction &cgf,
   result = BinOp::create(builder, result.getLoc(), result, emittedArgValue);
 
   if (invert)
-    result = cir::UnaryOp::create(builder, result.getLoc(),
-                                  cir::UnaryOpKind::Not, result);
+    result = builder.createNot(result);
 
   result = emitFromInt(cgf, result, typ, originalArgType);
   return RValue::get(result);
@@ -1085,8 +1084,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
   case Builtin::BIconjf:
   case Builtin::BIconjl: {
     mlir::Value complex = emitComplexExpr(e->getArg(0));
-    mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
-                                             cir::UnaryOpKind::Not, complex);
+    mlir::Value conj = builder.createNot(complex);
     return RValue::getComplex(conj);
   }
 
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 51dc297e86d01..3d6e32085cc83 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -218,7 +218,7 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
 
   // Unary &
   if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
-    // TODO(cir): maybe we should use cir.unary for pointers here instead.
+    // TODO(cir): maybe we should use a CIR unary op for pointers here instead.
     if (uo->getOpcode() == UO_AddrOf) {
       LValue lv = emitLValue(uo->getSubExpr());
       if (baseInfo)
@@ -1052,17 +1052,14 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
   }
   case UO_PreInc:
   case UO_PreDec: {
-    cir::UnaryOpKind kind =
-        e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
     LValue lv = emitLValue(e->getSubExpr());
 
     assert(e->isPrefix() && "Prefix operator in unexpected state!");
 
-    if (e->getType()->isAnyComplexType()) {
-      emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
-    } else {
-      emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
-    }
+    if (e->getType()->isAnyComplexType())
+      emitComplexPrePostIncDec(e, lv);
+    else
+      emitScalarPrePostIncDec(e, lv);
 
     return lv;
   }
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 1d05f03491d40..7d302030ccf50 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -173,22 +173,21 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> {
   mlir::Value VisitStmtExpr(const StmtExpr *e);
 
   // Operators.
-  mlir::Value VisitPrePostIncDec(const UnaryOperator *e, cir::UnaryOpKind op,
-                                 bool isPre) {
+  mlir::Value VisitPrePostIncDec(const UnaryOperator *e) {
     LValue lv = cgf.emitLValue(e->getSubExpr());
-    return cgf.emitComplexPrePostIncDec(e, lv, op, isPre);
+    return cgf.emitComplexPrePostIncDec(e, lv);
   }
   mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
-    return VisitPrePostIncDec(e, cir::UnaryOpKind::Dec, false);
+    return VisitPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
-    return VisitPrePostIncDec(e, cir::UnaryOpKind::Inc, false);
+    return VisitPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
-    return VisitPrePostIncDec(e, cir::UnaryOpKind::Dec, true);
+    return VisitPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
-    return VisitPrePostIncDec(e, cir::UnaryOpKind::Inc, true);
+    return VisitPrePostIncDec(e);
   }
   mlir::Value VisitUnaryDeref(const Expr *e) { return emitLoadOfLValue(e); }
 
@@ -602,8 +601,7 @@ mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e,
     op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
   else
     op = Visit(e->getSubExpr());
-  return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()),
-                               cir::UnaryOpKind::Minus, op);
+  return builder.createMinus(cgf.getLoc(e->getExprLoc()), op);
 }
 
 mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *e) {
@@ -1095,15 +1093,11 @@ LValue CIRGenFunction::emitComplexCompoundAssignmentLValue(
 }
 
 mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e,
-                                                     LValue lv,
-                                                     cir::UnaryOpKind op,
-                                                     bool isPre) {
-  assert((op == cir::UnaryOpKind::Inc || op == cir::UnaryOpKind::Dec) &&
-         "Invalid UnaryOp kind for ComplexType");
-
+                                                     LValue lv) {
   mlir::Value inVal = emitLoadOfComplex(lv, e->getExprLoc());
   mlir::Location loc = getLoc(e->getExprLoc());
-  mlir::Value incVal = builder.createUnaryOp(loc, op, inVal);
+  mlir::Value incVal = e->isIncrementOp() ? builder.createInc(loc, inVal)
+                                          : builder.createDec(loc, inVal);
 
   // Store the updated result through the lvalue.
   emitStoreOfComplex(loc, incVal, lv, /*isInit=*/false);
@@ -1113,7 +1107,7 @@ mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e,
 
   // If this is a postinc, return the value read from memory, otherwise use the
   // updated value.
-  return isPre ? incVal : inVal;
+  return e->isPrefix() ? incVal : inVal;
 }
 
 LValue CIRGenFunction::emitScalarCompoundAssignWithComplex(
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 29aa35e762f45..711d921e4c79f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -586,24 +586,23 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
   VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
 
   // Unary Operators.
-  mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
+  mlir::Value VisitUnaryPrePostIncDec(const UnaryOperator *e) {
     LValue lv = cgf.emitLValue(e->getSubExpr());
-    return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
+    return emitScalarPrePostIncDec(e, lv);
+  }
+  mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
+    return VisitUnaryPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
-    LValue lv = cgf.emitLValue(e->getSubExpr());
-    return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
+    return VisitUnaryPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
-    LValue lv = cgf.emitLValue(e->getSubExpr());
-    return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
+    return VisitUnaryPrePostIncDec(e);
   }
   mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
-    LValue lv = cgf.emitLValue(e->getSubExpr());
-    return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
+    return VisitUnaryPrePostIncDec(e);
   }
-  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
-                                      cir::UnaryOpKind kind, bool isPre) {
+  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv) {
     if (cgf.getLangOpts().OpenMP)
       cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
 
@@ -632,7 +631,7 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
     //          -> bool = ((int)bool + 1 != 0)
     // An interesting aspect of this is that increment is always true.
     // Decrement does not have this property.
-    if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
+    if (e->isIncrementOp() && type->isBooleanType()) {
       value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
     } else if (type->isIntegerType()) {
       QualType promotedType;
@@ -663,12 +662,10 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
       assert(!cir::MissingFeatures::sanitizers());
       if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
-        value = emitIncDecConsiderOverflowBehavior(e, value, kind);
+        value = emitIncDecConsiderOverflowBehavior(e, value);
       } else {
-        cir::UnaryOpKind kind =
-            e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
         // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
-        value = emitUnaryOp(e, kind, input, /*nsw=*/false);
+        value = emitIncOrDec(e, input, /*nsw=*/false);
       }
     } else if (const PointerType *ptr = type->getAs<PointerType>()) {
       QualType type = ptr->getPointeeType();
@@ -680,7 +677,7 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
         // For everything else, we can just do a simple increment.
         mlir::Location loc = cgf.getLoc(e->getSourceRange());
         CIRGenBuilderTy &builder = cgf.getBuilder();
-        int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
+        int amount = e->isIncrementOp() ? 1 : -1;
         mlir::Value amt = builder.getSInt32(amount, loc);
         assert(!cir::MissingFeatures::sanitizers());
         value = builder.createPtrStride(loc, value, amt);
@@ -700,10 +697,7 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
       if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
         // Create the inc/dec operation.
         // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
-        assert(
-            (kind == cir::UnaryOpKind::Inc || kind == cir::UnaryOpKind::Dec) &&
-            "Invalid UnaryOp kind");
-        value = emitUnaryOp(e, kind, value);
+        value = emitIncOrDec(e, value);
       } else {
         cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
         return {};
@@ -728,23 +722,20 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
     // If this is a postinc, return the value read from memory, otherwise use
     // the updated value.
-    return isPre ? value : input;
+    return e->isPrefix() ? value : input;
   }
 
   mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
-                                                 mlir::Value inVal,
-                                                 cir::UnaryOpKind kind) {
-    assert((kind == cir::UnaryOpKind::Inc || kind == cir::UnaryOpKind::Dec) &&
-           "Invalid UnaryOp kind");
+                                                 mlir::Value inVal) {
     switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
     case LangOptions::SOB_Defined:
-      return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
+      return emitIncOrDec(e, inVal, /*nsw=*/false);
     case LangOptions::SOB_Undefined:
       assert(!cir::MissingFeatures::sanitizers());
-      return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
+      return emitIncOrDec(e, inVal, /*nsw=*/true);
     case LangOptions::SOB_Trapping:
       if (!e->canOverflow())
-        return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
+        return emitIncOrDec(e, inVal, /*nsw=*/true);
       cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
       return {};
     }
@@ -804,20 +795,23 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
     // NOTE: LLVM codegen will lower this directly to either a FNeg
     // or a Sub instruction.  In CIR this will be handled later in LowerToLLVM.
-    return emitUnaryOp(e, cir::UnaryOpKind::Minus, operand, nsw);
+    return builder.createOrFold<cir::MinusOp>(
+        cgf.getLoc(e->getSourceRange().getBegin()), operand, nsw);
   }
 
-  mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
-                          mlir::Value input, bool nsw = false) {
-    return builder.createOrFold<cir::UnaryOp>(
-        cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
-        input, nsw);
+  mlir::Value emitIncOrDec(const UnaryOperator *e, mlir::Value input,
+                           bool nsw = false) {
+    mlir::Location loc = cgf.getLoc(e->getSourceRange().getBegin());
+    return e->isIncrementOp()
+               ? builder.createOrFold<cir::IncOp>(loc, input, nsw)
+               : builder.createOrFold<cir::DecOp>(loc, input, nsw);
   }
 
   mlir::Value VisitUnaryNot(const UnaryOperator *e) {
     ignoreResultAssign = false;
     mlir::Value op = Visit(e->getSubExpr());
-    return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
+    return builder.createOrFold<cir::NotOp>(
+        cgf.getLoc(e->getSourceRange().getBegin()), op);
   }
 
   mlir::Value VisitUnaryLNot(const UnaryOperator *e);
@@ -2815,9 +2809,6 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
 }
 
 mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e,
-                                                    LValue lv,
-                                                    cir::UnaryOpKind kind,
-                                                    bool isPre) {
-  return ScalarExprEmitter(*this, builder)
-      .emitScalarPrePostIncDec(e, lv, kind, isPre);
+                                                    LValue lv) {
+  return ScalarExprEmitter(*this, builder).emitScalarPrePostIncDec(e, lv);
 }
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0e82958ef6f39..696b17d93061e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1660,8 +1660,7 @@ class CIRGenFunction : public CIRGenTypeCache {
   mlir::Value emitScalarExpr(const clang::Expr *e,
                              bool ignoreResultAssign = false);
 
-  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
-                                      cir::UnaryOpKind kind, bool isPre);
+  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv);
 
   /// Build a debug stoppoint if we are emitting debug info.
   void emitStopPoint(const Stmt *s);
@@ -1692,8 +1691,7 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit);
 
-  mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv,
-                                       cir::UnaryOpKind op, bool isPre);
+  mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv);
 
   LValue emitComplexAssignmentLValue(const BinaryOperator *e);
   LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index 44f6bc845ec8f..eab045e186699 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -92,8 +92,7 @@ void OpenACCRecipeBuilderBase::makeAllocaCopy(mlir::Location loc,
         [&](mlir::OpBuilder &b, mlir::Location loc) {
           // Simple increment of the iterator.
           auto load = cir::LoadOp::create(builder, loc, {itr});
-          auto inc = cir::UnaryOp::create(builder, loc, load.getType(),
-                                          cir::UnaryOpKind::Inc, load);
+          auto inc = builder.createInc(loc, load);
           builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
           builder.createYield(loc);
         });
@@ -308,9 +307,8 @@ std::pair<mlir::Value, mlir::Value> OpenACCRecipeBuilderBase::createBoundsLoop(
         /*stepBuilder=*/
         [&](mlir::OpBuilder &b, mlir::Location loc) {
           auto load = cir::LoadOp::create(builder, loc, {itr});
-          auto unary = cir::UnaryOp::create(
-              builder, loc, load.getType(),
-              inverse ? cir::UnaryOpKind::Dec : cir::UnaryOpKind::Inc, load);
+          auto unary = inverse ? builder.createDec(loc, load)
+                               : builder.createInc(loc, load);
           builder.CIRBaseBuilderTy::createStore(loc, unary, itr);
           builder.createYield(loc);
         });
@@ -654,8 +652,7 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
         /*stepBuilder=*/
         [&](mlir::OpBuilder &b, mlir::Location loc) {
           auto loadItr = cir::LoadOp::create(builder, loc, {itr});
-          auto inc = cir::UnaryOp::create(builder, loc, loadItr.getType(),
-                                          cir::UnaryOpKind::Inc, loadItr);
+          auto inc = builder.createInc(loc, loadItr);
           builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
           builder.createYield(loc);
         }));
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 87cf3950b04d6..0b63c2b7450fb 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2664,106 +2664,74 @@ LogicalResult cir::LabelOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// UnaryOp
+// IncOp
 //===----------------------------------------------------------------------===//
 
-LogicalResult cir::UnaryOp::verify() {
-  switch (getKind()) {
-  case cir::UnaryOpKind::Inc:
-  case cir::UnaryOpKind::Dec:
-  case cir::UnaryOpKind::Minus:
-  case cir::UnaryOpKind::Not:
-    // Nothing to verify.
-    return success();
-  }
-
-  llvm_unreachable("Unknown UnaryOp kind?");
+OpFoldResult cir::IncOp::fold(FoldAdaptor adaptor) {
+  if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()))
+    return adaptor.getInput();
+  return {};
 }
 
-static bool isBoolNot(cir::UnaryOp op) {
-  return isa<cir::BoolType>(op.getInput().getType()) &&
-         op.getKind() == cir::UnaryOpKind::Not;
+//===----------------------------------------------------------------------===//
+// DecOp
+//===----------------------------------------------------------------------===//
+
+OpFoldResult cir::DecOp::fold(FoldAdaptor adaptor) {
+  if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()))
+    return adaptor.getInput();
+  return {};
 }
 
-// This folder simplifies the sequential boolean not operations.
-// For instance, the next two unary operations will be eliminated:
-//
-// ```mlir
-// %1 = cir.unary(not, %0) : !cir.bool, !cir.bool
-// %2 = cir.unary(not, %1) : !cir.bool, !cir.bool
-// ```
-//
-// and the argument of the first one (%0) will be used instead.
-OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
-  if (auto poison =
-          mlir::dyn_cast_if_present<cir::PoisonAttr>(adaptor.getInput())) {
-    // Propagate poison values
-    return poison;
-  }
+//===----------------------------------------------------------------------===//
+// MinusOp
+//===----------------------------------------------------------------------===//
 
-  if (isBoolNot(*this))
-    if (auto previous = getInput().getDefiningOp<cir::UnaryOp>())
-      if (isBoolNot(previous))
-        return previous.getInput();
+OpFoldResult cir::MinusOp::fold(FoldAdaptor adaptor) {
+  if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()))
+    return adaptor.getInput();
 
-  // Avoid introducing unnecessary duplicate constants in cases where we are
-  // just folding the operation to its input value. If we return the
-  // input attribute from the adapter, a new constant is materialized, but
-  // if we return the input value directly, it avoids that.
-  if (auto srcConst = getInput().getDefiningOp<cir::ConstantOp>()) {
-    if (mlir::isa<cir::BoolType>(srcConst.getType()) &&
-        getKind() == cir::UnaryOpKind::Minus)
+  // Avoid materializing a duplicate constant for bool minus (identity).
+  if (auto srcConst = getInput().getDefiningOp<cir::ConstantOp>())
+    if (mlir::isa<cir::BoolType>(srcConst.getType()))
       return srcConst.getResult();
+
+  // Fold with constant inputs.
+  if (mlir::Attribute attr = adaptor.getInput()) {
+    if (auto intAttr = mlir::dyn_cast<cir::IntAttr>(attr)) {
+      APInt val = intAttr.getValue();
+      val.negate();
+      return cir::IntAttr::get(getType(), val);
+    }
+    if (auto fpAttr = mlir::dyn_cast<cir::FPAttr>(attr)) {
+      APFloat val = fpAttr.getValue();
+      val.changeSign();
+      return cir::FPAttr::get(getType(), val);
+    }
   }
 
-  // Fold unary operations with constant inputs. If the input is a ConstantOp,
-  // it "folds" to its value attribute. If it was some other operation that
-  // was folded, it will be an mlir::Attribute that hasn't yet been
-  // materialized. If it was a value that couldn't be folded, it will be null.
+  return {};
+}
+
+//===----------------------------------------------------------------------===//
+// NotOp
+//===----------------------------------------------------------------------===//
+
+OpFoldResult cir::NotOp::fold(FoldAdaptor adaptor) {
+  if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()))
+    return adaptor.getInput();
+
+  // not(not(x)) -> x is handled by the Involution trait.
+
+  // Fold with constant inputs.
   if (mlir::Attribute attr = adaptor.getInput()) {
-    // For now, we only attempt to fold simple scalar values.
-    OpFoldResult result =
-        llvm::TypeSwitch<mlir::Attribute, OpFoldResult>(attr)
-            .Case<cir::IntAttr>([&](cir::IntAttr attrT) {
-              switch (getKind()) {
-              case cir::UnaryOpKind::Not: {
-                APInt val = attrT.getValue();
-                val.flipAllBits();
-                return cir::IntAttr::get(getType(), val);
-              }
-              case cir::UnaryOpKind::Minus: {
-                APInt val = attrT.getValue();
-                val.negate();
-                return cir::IntAttr::get(getType(), val);
-              }
-              default:
-                return cir::IntAttr{};
-              }
-            })
-            .Case<cir::FPAttr>([&](cir::FPAttr attrT) {
-              switch (getKind()) {
-              case cir::UnaryOpKind::Minus: {
-                APFloat val = attrT.getValue();
-                val.changeSign();
-                return cir::FPAttr::get(getType(), val);
-              }
-              default:
-                return cir::FPAttr{};
-              }
-            })
-            .Case<cir::BoolAttr>([&](cir::BoolAttr attrT) {
-              switch (getKind()) {
-              case cir::UnaryOpKind::Not:
-                return cir::BoolAttr::get(getContext(), !attrT.getValue());
-              case cir::UnaryOpKind::Minus:
-                return attrT;
-              default:
-                return cir::BoolAttr{};
-              }
-            })
-            .Default([&](auto attrT) { return mlir::Attribute{}; });
-    if (result)
-      return result;
+    if (auto intAttr = mlir::dyn_cast<cir::IntAttr>(attr)) {
+      APInt val = intAttr.getValue();
+      val.flipAllBits();
+      return cir::IntAttr::get(getType(), val);
+    }
+    if (auto boolAttr = mlir::dyn_cast<cir::BoolAttr>(attr))
+      return cir::BoolAttr::get(getContext(), !boolAttr.getValue());
   }
 
   return {};
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index 46a8c011b320b..0f1760f066a8e 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -70,12 +70,12 @@ void CIRCanonicalizePass::runOnOperation() {
 
     // Many operations are here to perform a manual `fold` in
     // applyOpPatternsGreedily.
-    if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp, AddOp,
-            MulOp, AndOp, OrOp, XorOp, MaxOp, ComplexCreateOp, ComplexImagOp,
-            ComplexRealOp, VecCmpOp, VecCreateOp, VecExtractOp, VecShuffleOp,
-            VecShuffleDynamicOp, VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp,
-            BitFfsOp, BitParityOp, BitPopcountOp, BitReverseOp, ByteSwapOp,
-            RotateOp, ConstantOp>(op))
+    if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, IncOp, DecOp,
+            MinusOp, NotOp, AddOp, MulOp, AndOp, OrOp, XorOp, MaxOp,
+            ComplexCreateOp, ComplexImagOp, ComplexRealOp, VecCmpOp,
+            VecCreateOp, VecExtractOp, VecShuffleOp, VecShuffleDynamicOp,
+            VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitFfsOp, BitParityOp,
+            BitPopcountOp, BitReverseOp, ByteSwapOp, RotateOp, ConstantOp>(op))
       ops.push_back(op);
   });
 
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
index dcef9ddee1bb4..45cf41052089c 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRSimplify.cpp
@@ -125,7 +125,7 @@ struct SimplifyTernary final : public OpRewritePattern<TernaryOp> {
 ///
 ///    %0 = cir.select if %condition then false else true
 ///    ->
-///    %0 = cir.unary not %condition
+///    %0 = cir.not %condition
 struct SimplifySelect : public OpRewritePattern<SelectOp> {
   using OpRewritePattern<SelectOp>::OpRewritePattern;
 
@@ -148,10 +148,9 @@ struct SimplifySelect : public OpRewritePattern<SelectOp> {
       return mlir::success();
     }
 
-    // cir.select if %0 then #false else #true -> cir.unary not %0
+    // cir.select if %0 then #false else #true -> cir.not %0
     if (!trueValue.getValue() && falseValue.getValue()) {
-      rewriter.replaceOpWithNewOp<cir::UnaryOp>(op, cir::UnaryOpKind::Not,
-                                                op.getCondition());
+      rewriter.replaceOpWithNewOp<cir::NotOp>(op, op.getCondition());
       return mlir::success();
     }
 
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index ad6c49ca87183..4d43e7d645c3b 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -22,6 +22,7 @@
 #include "clang/CIR/Dialect/Passes.h"
 #include "clang/CIR/Interfaces/ASTAttrInterfaces.h"
 #include "clang/CIR/MissingFeatures.h"
+#include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/Path.h"
 
 #include <memory>
@@ -73,7 +74,7 @@ struct LoweringPreparePass
   void lowerCastOp(cir::CastOp op);
   void lowerComplexDivOp(cir::ComplexDivOp op);
   void lowerComplexMulOp(cir::ComplexMulOp op);
-  void lowerUnaryOp(cir::UnaryOp op);
+  void lowerUnaryOp(cir::UnaryOpInterface op);
   void lowerGlobalOp(cir::GlobalOp op);
   void lowerArrayDtor(cir::ArrayDtor op);
   void lowerArrayCtor(cir::ArrayCtor op);
@@ -797,14 +798,11 @@ void LoweringPreparePass::lowerComplexMulOp(cir::ComplexMulOp op) {
   op.erase();
 }
 
-void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
-  mlir::Type ty = op.getType();
-  if (!mlir::isa<cir::ComplexType>(ty))
+void LoweringPreparePass::lowerUnaryOp(cir::UnaryOpInterface op) {
+  if (!mlir::isa<cir::ComplexType>(op.getResult().getType()))
     return;
 
-  mlir::Location loc = op.getLoc();
-  cir::UnaryOpKind opKind = op.getKind();
-
+  mlir::Location loc = op->getLoc();
   CIRBaseBuilderTy builder(getContext());
   builder.setInsertionPointAfter(op);
 
@@ -812,31 +810,25 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
   mlir::Value operandReal = builder.createComplexReal(loc, operand);
   mlir::Value operandImag = builder.createComplexImag(loc, operand);
 
-  mlir::Value resultReal;
-  mlir::Value resultImag;
-
-  switch (opKind) {
-  case cir::UnaryOpKind::Inc:
-  case cir::UnaryOpKind::Dec:
-    resultReal = builder.createUnaryOp(loc, opKind, operandReal);
-    resultImag = operandImag;
-    break;
-
-  case cir::UnaryOpKind::Minus:
-    resultReal = builder.createUnaryOp(loc, opKind, operandReal);
-    resultImag = builder.createUnaryOp(loc, opKind, operandImag);
-    break;
-
-  case cir::UnaryOpKind::Not:
-    resultReal = operandReal;
-    resultImag =
-        builder.createUnaryOp(loc, cir::UnaryOpKind::Minus, operandImag);
-    break;
-  }
+  mlir::Value resultReal = operandReal;
+  mlir::Value resultImag = operandImag;
+
+  llvm::TypeSwitch<mlir::Operation *>(op)
+      .Case<cir::IncOp>(
+          [&](auto) { resultReal = builder.createInc(loc, operandReal); })
+      .Case<cir::DecOp>(
+          [&](auto) { resultReal = builder.createDec(loc, operandReal); })
+      .Case<cir::MinusOp>([&](auto) {
+        resultReal = builder.createMinus(loc, operandReal);
+        resultImag = builder.createMinus(loc, operandImag);
+      })
+      .Case<cir::NotOp>(
+          [&](auto) { resultImag = builder.createMinus(loc, operandImag); })
+      .Default([](auto) { llvm_unreachable("unhandled unary complex op"); });
 
   mlir::Value result = builder.createComplexCreate(loc, resultReal, resultImag);
-  op.replaceAllUsesWith(result);
-  op.erase();
+  op->replaceAllUsesWith(mlir::ValueRange{result});
+  op->erase();
 }
 
 cir::FuncOp LoweringPreparePass::getOrCreateDtorFunc(CIRBaseBuilderTy &builder,
@@ -1547,8 +1539,8 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) {
           !globalOp.getCtorRegion().empty())
         handleStaticLocal(globalOp, getGlobal);
     }
-  } else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op)) {
-    lowerUnaryOp(unary);
+  } else if (auto unaryOp = mlir::dyn_cast<cir::UnaryOpInterface>(op)) {
+    lowerUnaryOp(unaryOp);
   } else if (auto callOp = dyn_cast<cir::CallOp>(op)) {
     lowerTrivialCopyCall(callOp);
   } else if (auto storeOp = dyn_cast<cir::StoreOp>(op)) {
@@ -1572,7 +1564,8 @@ void LoweringPreparePass::runOnOperation() {
     if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
                   cir::ComplexMulOp, cir::ComplexDivOp, cir::DynamicCastOp,
                   cir::FuncOp, cir::CallOp, cir::GetGlobalOp, cir::GlobalOp,
-                  cir::StoreOp, cir::UnaryOp>(op))
+                  cir::StoreOp, cir::IncOp, cir::DecOp, cir::MinusOp,
+                  cir::NotOp>(op))
       opsToTransform.push_back(op);
   });
 
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 58e7c8a65a1cd..e5f459197150a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2715,120 +2715,106 @@ mlir::LogicalResult CIRToLLVMSwitchFlatOpLowering::matchAndRewrite(
   return mlir::success();
 }
 
-mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
-    cir::UnaryOp op, OpAdaptor adaptor,
-    mlir::ConversionPatternRewriter &rewriter) const {
-  assert(op.getType() == op.getInput().getType() &&
-         "Unary operation's operand type and result type are different");
-  mlir::Type type = op.getType();
-  mlir::Type elementType = elementTypeIfVector(type);
-  bool isVector = mlir::isa<cir::VectorType>(type);
-  mlir::Type llvmType = getTypeConverter()->convertType(type);
+static mlir::LLVM::IntegerOverflowFlags nswFlag(bool nsw) {
+  return nsw ? mlir::LLVM::IntegerOverflowFlags::nsw
+             : mlir::LLVM::IntegerOverflowFlags::none;
+}
+
+template <typename CIROp, typename LLVMIntOp>
+static mlir::LogicalResult
+lowerIncDecOp(CIROp op, typename CIROp::Adaptor adaptor,
+              mlir::ConversionPatternRewriter &rewriter, double fpConstant) {
+  mlir::Type elementType = elementTypeIfVector(op.getType());
+  mlir::Type llvmType = adaptor.getInput().getType();
   mlir::Location loc = op.getLoc();
 
-  // Integer unary operations: - ~ ++ --
   if (mlir::isa<cir::IntType>(elementType)) {
-    mlir::LLVM::IntegerOverflowFlags maybeNSW =
-        op.getNoSignedWrap() ? mlir::LLVM::IntegerOverflowFlags::nsw
-                             : mlir::LLVM::IntegerOverflowFlags::none;
-    switch (op.getKind()) {
-    case cir::UnaryOpKind::Inc: {
-      assert(!isVector && "++ not allowed on vector types");
-      auto one = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 1);
-      rewriter.replaceOpWithNewOp<mlir::LLVM::AddOp>(
-          op, llvmType, adaptor.getInput(), one, maybeNSW);
-      return mlir::success();
-    }
-    case cir::UnaryOpKind::Dec: {
-      assert(!isVector && "-- not allowed on vector types");
-      auto one = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 1);
-      rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(op, adaptor.getInput(),
-                                                     one, maybeNSW);
-      return mlir::success();
-    }
-    case cir::UnaryOpKind::Minus: {
-      mlir::Value zero;
-      if (isVector)
-        zero = mlir::LLVM::ZeroOp::create(rewriter, loc, llvmType);
-      else
-        zero = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 0);
-      rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(
-          op, zero, adaptor.getInput(), maybeNSW);
-      return mlir::success();
-    }
-    case cir::UnaryOpKind::Not: {
-      // bit-wise compliment operator, implemented as an XOR with -1.
-      mlir::Value minusOne;
-      if (isVector) {
-        const uint64_t numElements =
-            mlir::dyn_cast<cir::VectorType>(type).getSize();
-        std::vector<int32_t> values(numElements, -1);
-        mlir::DenseIntElementsAttr denseVec = rewriter.getI32VectorAttr(values);
-        minusOne =
-            mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, denseVec);
-      } else {
-        minusOne = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, -1);
-      }
-      rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, adaptor.getInput(),
-                                                     minusOne);
-      return mlir::success();
-    }
-    }
-    llvm_unreachable("Unexpected unary op for int");
+    auto maybeNSW = nswFlag(op.getNoSignedWrap());
+    auto one = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 1);
+    rewriter.replaceOpWithNewOp<LLVMIntOp>(op, adaptor.getInput(), one,
+                                           maybeNSW);
+    return mlir::success();
+  }
+  if (mlir::isa<cir::FPTypeInterface>(elementType)) {
+    auto fpConst = mlir::LLVM::ConstantOp::create(
+        rewriter, loc, rewriter.getFloatAttr(llvmType, fpConstant));
+    rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(op, fpConst,
+                                                    adaptor.getInput());
+    return mlir::success();
   }
+  return op.emitError() << "Unsupported type for IncOp/DecOp";
+}
 
-  // Floating point unary operations: - ++ --
+mlir::LogicalResult CIRToLLVMIncOpLowering::matchAndRewrite(
+    cir::IncOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  return lowerIncDecOp<cir::IncOp, mlir::LLVM::AddOp>(op, adaptor, rewriter,
+                                                      1.0);
+}
+
+mlir::LogicalResult CIRToLLVMDecOpLowering::matchAndRewrite(
+    cir::DecOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  return lowerIncDecOp<cir::DecOp, mlir::LLVM::SubOp>(op, adaptor, rewriter,
+                                                      -1.0);
+}
+
+mlir::LogicalResult CIRToLLVMMinusOpLowering::matchAndRewrite(
+    cir::MinusOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  mlir::Type elementType = elementTypeIfVector(op.getType());
+  bool isVector = mlir::isa<cir::VectorType>(op.getType());
+  mlir::Type llvmType = adaptor.getInput().getType();
+  mlir::Location loc = op.getLoc();
+
+  if (mlir::isa<cir::IntType>(elementType)) {
+    auto maybeNSW = nswFlag(op.getNoSignedWrap());
+    mlir::Value zero;
+    if (isVector)
+      zero = mlir::LLVM::ZeroOp::create(rewriter, loc, llvmType);
+    else
+      zero = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 0);
+    rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(op, zero, adaptor.getInput(),
+                                                   maybeNSW);
+    return mlir::success();
+  }
   if (mlir::isa<cir::FPTypeInterface>(elementType)) {
-    switch (op.getKind()) {
-    case cir::UnaryOpKind::Inc: {
-      assert(!isVector && "++ not allowed on vector types");
-      mlir::LLVM::ConstantOp one = mlir::LLVM::ConstantOp::create(
-          rewriter, loc, llvmType, rewriter.getFloatAttr(llvmType, 1.0));
-      rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(op, llvmType, one,
-                                                      adaptor.getInput());
-      return mlir::success();
-    }
-    case cir::UnaryOpKind::Dec: {
-      assert(!isVector && "-- not allowed on vector types");
-      mlir::LLVM::ConstantOp minusOne = mlir::LLVM::ConstantOp::create(
-          rewriter, loc, llvmType, rewriter.getFloatAttr(llvmType, -1.0));
-      rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(op, llvmType, minusOne,
-                                                      adaptor.getInput());
-      return mlir::success();
-    }
-    case cir::UnaryOpKind::Minus:
-      rewriter.replaceOpWithNewOp<mlir::LLVM::FNegOp>(op, llvmType,
-                                                      adaptor.getInput());
-      return mlir::success();
-    case cir::UnaryOpKind::Not:
-      return op.emitError() << "Unary not is invalid for floating-point types";
-    }
-    llvm_unreachable("Unexpected unary op for float");
+    rewriter.replaceOpWithNewOp<mlir::LLVM::FNegOp>(op, adaptor.getInput());
+    return mlir::success();
   }
+  return op.emitError() << "Unsupported type for unary minus";
+}
 
-  // Boolean unary operations: ! only. (For all others, the operand has
-  // already been promoted to int.)
-  if (mlir::isa<cir::BoolType>(elementType)) {
-    switch (op.getKind()) {
-    case cir::UnaryOpKind::Inc:
-    case cir::UnaryOpKind::Dec:
-    case cir::UnaryOpKind::Minus:
-      // Some of these are allowed in source code, but we shouldn't get here
-      // with a boolean type.
-      return op.emitError() << "Unsupported unary operation on boolean type";
-    case cir::UnaryOpKind::Not: {
-      assert(!isVector && "NYI: op! on vector mask");
-      auto one = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 1);
-      rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, adaptor.getInput(),
-                                                     one);
-      return mlir::success();
-    }
+mlir::LogicalResult CIRToLLVMNotOpLowering::matchAndRewrite(
+    cir::NotOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  mlir::Type elementType = elementTypeIfVector(op.getType());
+  bool isVector = mlir::isa<cir::VectorType>(op.getType());
+  mlir::Type llvmType = adaptor.getInput().getType();
+  mlir::Location loc = op.getLoc();
+
+  if (mlir::isa<cir::IntType>(elementType)) {
+    mlir::Value minusOne;
+    if (isVector) {
+      const uint64_t numElements =
+          mlir::dyn_cast<cir::VectorType>(op.getType()).getSize();
+      SmallVector<int32_t> values(numElements, -1);
+      mlir::DenseIntElementsAttr denseVec = rewriter.getI32VectorAttr(values);
+      minusOne =
+          mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, denseVec);
+    } else {
+      minusOne = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, -1);
     }
-    llvm_unreachable("Unexpected unary op for bool");
+    rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, adaptor.getInput(),
+                                                   minusOne);
+    return mlir::success();
   }
-
-  return op.emitError() << "Unary operation has unsupported type: "
-                        << elementType;
+  if (mlir::isa<cir::BoolType>(elementType)) {
+    auto one = mlir::LLVM::ConstantOp::create(rewriter, loc, llvmType, 1);
+    rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(op, adaptor.getInput(), one);
+    return mlir::success();
+  }
+  return op.emitError() << "Unsupported type for bitwise NOT";
 }
 
 static bool isIntTypeUnsigned(mlir::Type type) {
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 9bce73f253a11..8d380be5c6d2c 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -266,7 +266,7 @@ void c11_atomic_cmpxchg_strong(_Atomic(int) *ptr, int *expected, int desired, in
   __c11_atomic_compare_exchange_strong(ptr, expected, desired,
                                        __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -348,7 +348,7 @@ void c11_atomic_cmpxchg_weak(_Atomic(int) *ptr, int *expected, int desired, int
   __c11_atomic_compare_exchange_weak(ptr, expected, desired,
                                      __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -429,7 +429,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired, int failure) {
 
   __atomic_compare_exchange(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -460,7 +460,7 @@ void atomic_cmpxchg(int *ptr, int *expected, int *desired, int failure) {
 
   __atomic_compare_exchange(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -581,7 +581,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired, int failure) {
 
   __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -612,7 +612,7 @@ void atomic_cmpxchg_n(int *ptr, int *expected, int desired, int failure) {
 
   __atomic_compare_exchange_n(ptr, expected, desired, /*weak=*/1, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
   // CIR:         %[[OLD:.+]], %[[SUCCESS:.+]] = cir.atomic.cmpxchg weak success(seq_cst) failure(acquire) syncscope(system) %{{.+}}, %{{.+}}, %{{.+}} align(4) : (!cir.ptr<!s32i>, !s32i, !s32i) -> (!s32i, !cir.bool)
-  // CIR-NEXT:    %[[FAILED:.+]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+  // CIR-NEXT:    %[[FAILED:.+]] = cir.not %[[SUCCESS]] : !cir.bool
   // CIR-NEXT:    cir.if %[[FAILED]] {
   // CIR-NEXT:      cir.store align(4) %[[OLD]], %{{.+}} : !s32i, !cir.ptr<!s32i>
   // CIR-NEXT:    }
@@ -2027,7 +2027,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
   // CIR: [[RES0:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL0]] : (!cir.ptr<!s8i>, !s8i) -> !s8i
   // CIR: [[INTERM0:%.*]] = cir.and [[RES0]], [[VAL0]] : !s8i
-  // CIR: [[RET0:%.*]] =  cir.unary(not, [[INTERM0]]) : !s8i, !s8i
+  // CIR: [[RET0:%.*]] =  cir.not [[INTERM0]] : !s8i
   // LLVM:  [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[RES0:%.*]] = atomicrmw nand ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
   // LLVM:  [[INTERM0:%.*]] = and i8 [[RES0]], [[VAL0]]
@@ -2042,7 +2042,7 @@ void test_op_and_fetch() {
 
   // CIR: [[RES1:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL1:%.*]] : (!cir.ptr<!u8i>, !u8i) -> !u8i
   // CIR: [[INTERM1:%.*]] = cir.and [[RES1]], [[VAL1]] : !u8i
-  // CIR: [[RET1:%.*]] = cir.unary(not, [[INTERM1]]) : !u8i, !u8i
+  // CIR: [[RET1:%.*]] = cir.not [[INTERM1]] : !u8i
   // LLVM:  [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[RES1:%.*]] = atomicrmw nand ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
   // LLVM:  [[INTERM1:%.*]] = and i8 [[RES1]], [[VAL1]]
@@ -2058,7 +2058,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
   // CIR: [[RES2:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL2]] : (!cir.ptr<!s16i>, !s16i) -> !s16i
   // CIR: [[INTERM2:%.*]] = cir.and [[RES2]], [[VAL2]] : !s16i
-  // CIR: [[RET2:%.*]] =  cir.unary(not, [[INTERM2]]) : !s16i, !s16i
+  // CIR: [[RET2:%.*]] =  cir.not [[INTERM2]] : !s16i
   // LLVM:  [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
   // LLVM:  [[RES2:%.*]] = atomicrmw nand ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
@@ -2076,7 +2076,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
   // CIR: [[RES3:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL3]] : (!cir.ptr<!u16i>, !u16i) -> !u16i
   // CIR: [[INTERM3:%.*]] = cir.and [[RES3]], [[VAL3]] : !u16i
-  // CIR: [[RET3:%.*]] =  cir.unary(not, [[INTERM3]]) : !u16i, !u16i
+  // CIR: [[RET3:%.*]] =  cir.not [[INTERM3]] : !u16i
   // LLVM:  [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
   // LLVM:  [[RES3:%.*]] = atomicrmw nand ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
@@ -2094,7 +2094,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
   // CIR: [[RES4:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL4]] : (!cir.ptr<!s32i>, !s32i) -> !s32i
   // CIR: [[INTERM4:%.*]] = cir.and [[RES4]], [[VAL4]] : !s32i
-  // CIR: [[RET4:%.*]] =  cir.unary(not, [[INTERM4]]) : !s32i, !s32i
+  // CIR: [[RET4:%.*]] =  cir.not [[INTERM4]] : !s32i
   // LLVM:  [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
   // LLVM:  [[RES4:%.*]] = atomicrmw nand ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
@@ -2112,7 +2112,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
   // CIR: [[RES5:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL5]] : (!cir.ptr<!u32i>, !u32i) -> !u32i
   // CIR: [[INTERM5:%.*]] = cir.and [[RES5]], [[VAL5]] : !u32i
-  // CIR: [[RET5:%.*]] =  cir.unary(not, [[INTERM5]]) : !u32i, !u32i
+  // CIR: [[RET5:%.*]] =  cir.not [[INTERM5]] : !u32i
   // LLVM:  [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
   // LLVM:  [[RES5:%.*]] = atomicrmw nand ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
@@ -2130,7 +2130,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
   // CIR: [[RES6:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL6]] : (!cir.ptr<!s64i>, !s64i) -> !s64i
   // CIR: [[INTERM6:%.*]] = cir.and [[RES6]], [[VAL6]] : !s64i
-  // CIR: [[RET6:%.*]] =  cir.unary(not, [[INTERM6]]) : !s64i, !s64i
+  // CIR: [[RET6:%.*]] =  cir.not [[INTERM6]] : !s64i
   // LLVM:  [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
   // LLVM:  [[RES6:%.*]] = atomicrmw nand ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
@@ -2148,7 +2148,7 @@ void test_op_and_fetch() {
   // CIR: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
   // CIR: [[RES7:%.*]] = cir.atomic.fetch nand seq_cst syncscope(system) fetch_first {{%.*}}, [[VAL7]] : (!cir.ptr<!u64i>, !u64i) -> !u64i
   // CIR: [[INTERM7:%.*]] = cir.and [[RES7]], [[VAL7]] : !u64i
-  // CIR: [[RET7:%.*]] =  cir.unary(not, [[INTERM7]]) : !u64i, !u64i
+  // CIR: [[RET7:%.*]] =  cir.not [[INTERM7]] : !u64i
   // LLVM:  [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
   // LLVM:  [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
   // LLVM:  [[RES7:%.*]] = atomicrmw nand ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c
index 10af7ad1ef5ee..7d86bc36a31e6 100644
--- a/clang/test/CIR/CodeGen/bitfields.c
+++ b/clang/test/CIR/CodeGen/bitfields.c
@@ -288,7 +288,7 @@ void unOp(S* s) {
 // CIR:   [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
 // CIR:   [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "d"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
 // CIR:   [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
-// CIR:   [[TMP4:%.*]] = cir.unary(inc, [[TMP3]]) nsw : !s32i, !s32i
+// CIR:   [[TMP4:%.*]] = cir.inc nsw [[TMP3]] : !s32i
 // CIR:   cir.set_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i)
 
 // LLVM: define {{.*@unOp}}
diff --git a/clang/test/CIR/CodeGen/complex-builtins.cpp b/clang/test/CIR/CodeGen/complex-builtins.cpp
index 811af47a704f5..9a6ca7562ecf7 100644
--- a/clang/test/CIR/CodeGen/complex-builtins.cpp
+++ b/clang/test/CIR/CodeGen/complex-builtins.cpp
@@ -94,7 +94,7 @@ void foo4() {
 // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[COMPLEX]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
+// CIR: %[[IMAG_MINUS:.*]] = cir.minus %[[IMAG]] : !cir.float
 // CIR: %[[RESULT_VAL:.*]] = cir.complex.create %[[REAL]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
 // CIR: cir.store{{.*}} %[[RESULT_VAL]], %[[RESULT]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp
index 31cc0d8b8de56..753b322c6597a 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -13,7 +13,7 @@ void foo() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
-// CIR-BEFORE: %[[COMPLEX_NOT:.*]] = cir.unary(not, %[[TMP]]) : !cir.complex<!s32i>, !cir.complex<!s32i>
+// CIR-BEFORE: %[[COMPLEX_NOT:.*]] = cir.not %[[TMP]] : !cir.complex<!s32i>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_NOT]], %[[B_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
@@ -21,7 +21,7 @@ void foo() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!s32i> -> !s32i
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!s32i> -> !s32i
-// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !s32i, !s32i
+// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.minus %[[IMAG]] : !s32i
 // CIR-AFTER: %[[RESULT_VAL:.*]] = cir.complex.create %[[REAL]], %[[IMAG_MINUS]] : !s32i -> !cir.complex<!s32i>
 // CIR-AFTER: cir.store{{.*}} %[[RESULT_VAL]], %[[B_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
 
@@ -55,7 +55,7 @@ void foo2() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_NOT:.*]] = cir.unary(not, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_NOT:.*]] = cir.not %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_NOT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
@@ -63,7 +63,7 @@ void foo2() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.minus %[[IMAG]] : !cir.float
 // CIR-AFTER: %[[RESULT_VAL:.*]] = cir.complex.create %[[REAL]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[RESULT_VAL]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -97,7 +97,7 @@ void foo3() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_INC:.*]] = cir.unary(inc, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_INC:.*]] = cir.inc %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_INC]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-BEFORE: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -106,7 +106,7 @@ void foo3() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_INC:.*]] = cir.unary(inc, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[REAL_INC:.*]] = cir.inc %[[REAL]] : !cir.float
 // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_INC]], %[[IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-AFTER: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -146,7 +146,7 @@ void foo4() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_INC:.*]] = cir.unary(inc, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_INC:.*]] = cir.inc %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_INC]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_INC]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -155,7 +155,7 @@ void foo4() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_INC:.*]] = cir.unary(inc, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[REAL_INC:.*]] = cir.inc %[[REAL]] : !cir.float
 // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_INC]], %[[IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -195,7 +195,7 @@ void foo5() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_DEC:.*]] = cir.unary(dec, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_DEC:.*]] = cir.dec %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_DEC]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-BEFORE: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -204,7 +204,7 @@ void foo5() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_DEC:.*]] = cir.unary(dec, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[REAL_DEC:.*]] = cir.dec %[[REAL]] : !cir.float
 // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_DEC]], %[[IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-AFTER: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -244,7 +244,7 @@ void foo6() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_DEC:.*]] = cir.unary(dec, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_DEC:.*]] = cir.dec %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_DEC]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_DEC]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -253,7 +253,7 @@ void foo6() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_DEC:.*]] = cir.unary(dec, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[REAL_DEC:.*]] = cir.dec %[[REAL]] : !cir.float
 // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_DEC]], %[[IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
@@ -293,13 +293,11 @@ void foo7() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE-NOT: cir.unary
 // CIR-BEFORE: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-AFTER-NOT: cir.unary
 // CIR-AFTER: cir.store{{.*}} %[[TMP]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
@@ -326,7 +324,7 @@ void foo8() {
 // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[COMPLEX_MINUS:.*]] = cir.unary(minus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_MINUS:.*]] = cir.minus %[[TMP]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_MINUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
@@ -334,8 +332,8 @@ void foo8() {
 // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[REAL_MINUS:.*]] = cir.unary(minus, %[[REAL]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[REAL_MINUS:.*]] = cir.minus %[[REAL]] : !cir.float
+// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.minus %[[IMAG]] : !cir.float
 // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_MINUS]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -373,7 +371,6 @@ void foo9() {
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
 // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
 // CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.complex<!cir.float>
-// CIR-BEFORE-NOT: cir.unary
 // CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
 // CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
 
@@ -387,7 +384,6 @@ void foo9() {
 // CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER-NOT: cir.unary
 // CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16
 // CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16
 // CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
@@ -430,7 +426,7 @@ void foo10() {
 // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
 // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
 // CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.complex<!cir.float>
-// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.minus %[[A_COMPLEX_F32]] : !cir.complex<!cir.float>
 // CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex<!cir.float> -> !cir.complex<!cir.f16>
 // CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
 
@@ -444,8 +440,8 @@ void foo10() {
 // CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32]]) : !cir.float, !cir.float
-// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.minus %[[A_REAL_F32]] : !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.minus %[[A_IMAG_F32]] : !cir.float
 // CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
@@ -498,14 +494,14 @@ void complex_unary_inc_lvalue() {
 
 // CIR-BEFORE: %[[A_ADDR]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.inc %[[TMP_A]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.unary(inc, %2) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.inc %2 : !cir.float
 // CIR-AFTER: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
@@ -536,14 +532,14 @@ void complex_unary_dec_lvalue() {
 
 // CIR-BEFORE: %[[A_ADDR]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
-// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(dec, %[[TMP_A]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.dec %[[TMP_A]] : !cir.complex<!cir.float>
 // CIR-BEFORE: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
 // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
 // CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
 // CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
 // CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
-// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.unary(dec, %2) : !cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.dec %2 : !cir.float
 // CIR-AFTER: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
 // CIR-AFTER: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
 
diff --git a/clang/test/CIR/CodeGen/cxx-rewritten-binary-operator.cpp b/clang/test/CIR/CodeGen/cxx-rewritten-binary-operator.cpp
index c743cdc4845a3..f8eece43a0509 100644
--- a/clang/test/CIR/CodeGen/cxx-rewritten-binary-operator.cpp
+++ b/clang/test/CIR/CodeGen/cxx-rewritten-binary-operator.cpp
@@ -19,7 +19,7 @@ void cxx_rewritten_binary_operator_scalar_expr() {
 // CIR: %[[B_ADDR:.*]] = cir.alloca !rec_HasOpEq, !cir.ptr<!rec_HasOpEq>, ["b"]
 // CIR: %[[NEQ_ADDR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["neq", init]
 // CIR: %[[EQ:.*]] = cir.call @_ZNK7HasOpEqeqERKS_(%[[A_ADDR]], %[[B_ADDR]]) : (!cir.ptr<!rec_HasOpEq> {{.*}}, !cir.ptr<!rec_HasOpEq> {{.*}}) -> (!cir.bool{{.*}})
-// CIR: %[[NEQ:.*]] = cir.unary(not, %[[EQ]]) : !cir.bool, !cir.bool
+// CIR: %[[NEQ:.*]] = cir.not %[[EQ]] : !cir.bool
 // CIR: cir.store{{.*}} %[[NEQ]], %[[NEQ_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
 
 // LLVM: %[[A_ADDR:.*]] = alloca %struct.HasOpEq, i64 1, align 1
diff --git a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp
index d59dffbfd1257..e8f0da0ed3cdc 100644
--- a/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp
+++ b/clang/test/CIR/CodeGen/dynamic-cast-exact.cpp
@@ -83,7 +83,7 @@ Derived &ref_cast(Base1 &ref) {
 // CIR-NEXT:   %[[SRC_VPTR_PTR:.*]] = cir.cast bitcast %[[SRC]] : !cir.ptr<!rec_Base1> -> !cir.ptr<!cir.vptr>
 // CIR-NEXT:   %[[SRC_VPTR:.*]] = cir.load{{.*}} %[[SRC_VPTR_PTR]] : !cir.ptr<!cir.vptr>, !cir.vptr
 // CIR-NEXT:   %[[SUCCESS:.*]] = cir.cmp(eq, %[[SRC_VPTR]], %[[EXPECTED_VPTR]]) : !cir.vptr, !cir.bool
-// CIR-NEXT:   %[[FAILED:.*]] = cir.unary(not, %[[SUCCESS]]) : !cir.bool, !cir.bool
+// CIR-NEXT:   %[[FAILED:.*]] = cir.not %[[SUCCESS]] : !cir.bool
 // CIR-NEXT:   cir.if %[[FAILED]] {
 // CIR-NEXT:     cir.call @__cxa_bad_cast() : () -> ()
 // CIR-NEXT:     cir.unreachable
diff --git a/clang/test/CIR/CodeGen/fold-during-cg.c b/clang/test/CIR/CodeGen/fold-during-cg.c
index 166efe893d682..137d5bb3d4f20 100644
--- a/clang/test/CIR/CodeGen/fold-during-cg.c
+++ b/clang/test/CIR/CodeGen/fold-during-cg.c
@@ -18,19 +18,19 @@ void fold_int_not() {
 
   n = ~0;
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.not
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i32 -1, ptr %{{.*}}
 
   n = ~1;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.int<-2> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.not
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store i32 -2, ptr %{{.*}}
 
   n = ~0xFFFFFFFE;
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.not
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i32 1, ptr %{{.*}}
 }
@@ -43,73 +43,73 @@ void fold_int_plus() {
 
   n = +1;
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i32 1, ptr %{{.*}}
 
   n = +2;
   // CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[TWO]], %{{.*}}
   // LLVM_OGCG: store i32 2, ptr %{{.*}}
 
   n = +(-3);
   // CIR: %[[MINUS_THREE:.*]] = cir.const #cir.int<-3> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_THREE]], %{{.*}}
   // LLVM_OGCG: store i32 -3, ptr %{{.*}}
 
   n = +(0x1FFFFFFFF);
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i32 -1, ptr %{{.*}}
 
   s = +1;
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i16 1, ptr %{{.*}}
 
   s = +2;
   // CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[TWO]], %{{.*}}
   // LLVM_OGCG: store i16 2, ptr %{{.*}}
 
   s = +(-3);
   // CIR: %[[MINUS_THREE:.*]] = cir.const #cir.int<-3> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_THREE]], %{{.*}}
   // LLVM_OGCG: store i16 -3, ptr %{{.*}}
 
   s = +(0x1FFFF);
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i16 -1, ptr %{{.*}}
 
   u = +1;
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i32 1, ptr %{{.*}}
 
   u = +2;
   // CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[TWO]], %{{.*}}
   // LLVM_OGCG: store i32 2, ptr %{{.*}}
 
   u = +(-3);
   // CIR: %[[MINUS_THREE:.*]] = cir.const #cir.int<4294967293> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_THREE]], %{{.*}}
   // LLVM_OGCG: store i32 -3, ptr %{{.*}}
 
   u = +(0x1FFFFFFFF);
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<4294967295> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i32 -1, ptr %{{.*}}
 }
@@ -122,76 +122,76 @@ void fold_int_minus() {
 
   n = -1;
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i32 -1, ptr %{{.*}}
 
   n = -2;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.int<-2> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store i32 -2, ptr %{{.*}}
 
   n = -(-3);
   // CIR-DUP: %[[UNUSED_THREE:.*]] = cir.const #cir.int<3> : !s32i
   // CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[THREE]], %{{.*}}
   // LLVM_OGCG: store i32 3, ptr %{{.*}}
 
   n = -(0x1FFFFFFFF);
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i32 1, ptr %{{.*}}
 
   s = -1;
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<-1> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i16 -1, ptr %{{.*}}
 
   s = -2;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.int<-2> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store i16 -2, ptr %{{.*}}
 
   s = -(-3);
   // CIR-DUP: %[[UNUSED_THREE:.*]] = cir.const #cir.int<3> : !s32i
   // CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[THREE]], %{{.*}}
   // LLVM_OGCG: store i16 3, ptr %{{.*}}
 
   s = -(0x1FFFF);
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s16i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i16 1, ptr %{{.*}}
 
   u = -1;
   // CIR: %[[MINUS_ONE:.*]] = cir.const #cir.int<4294967295> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_ONE]], %{{.*}}
   // LLVM_OGCG: store i32 -1, ptr %{{.*}}
 
   u = -2;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.int<4294967294> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store i32 -2, ptr %{{.*}}
 
   u = -(-3);
   // CIR-DUP: %[[UNUSED_THREE:.*]] = cir.const #cir.int<3> : !s32i
   // CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[THREE]], %{{.*}}
   // LLVM_OGCG: store i32 3, ptr %{{.*}}
 
   u = -(0x1FFFFFFFF);
   // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[ONE]], %{{.*}}
   // LLVM_OGCG: store i32 1, ptr %{{.*}}
 }
@@ -203,25 +203,25 @@ void fold_float_plus() {
 
   f = +2.0f;
   // CIR: %[[TWO:.*]] = cir.const #cir.fp<2.000000e+00> : !cir.float
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[TWO]], %{{.*}}
   // LLVM_OGCG: store float 2.000000e+00, ptr %{{.*}}
 
   f = +(-3.0f);
   // CIR: %[[MINUS_THREE:.*]] = cir.const #cir.fp<-3.000000e+00> : !cir.float
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_THREE]], %{{.*}}
   // LLVM_OGCG: store float -3.000000e+00, ptr %{{.*}}
 
   d = +2.0;
   // CIR: %[[TWO:.*]] = cir.const #cir.fp<2.000000e+00> : !cir.double
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[TWO]], %{{.*}}
   // LLVM_OGCG: store double 2.000000e+00, ptr %{{.*}}
 
   d = +(-3.0);
   // CIR: %[[MINUS_THREE:.*]] = cir.const #cir.fp<-3.000000e+00> : !cir.double
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.plus
   // CIR: cir.store{{.*}} %[[MINUS_THREE]], %{{.*}}
   // LLVM_OGCG: store double -3.000000e+00, ptr %{{.*}}
 }
@@ -233,27 +233,27 @@ void fold_float_minus() {
 
   f = -2.0f;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.fp<-2.000000e+00> : !cir.float
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store float -2.000000e+00, ptr %{{.*}}
 
   f = -(-3.0f);
   // CIR-DUP: %[[UNUSED_THREE:.*]] = cir.const #cir.fp<3.000000e+00> : !cir.float
   // CIR: %[[THREE:.*]] = cir.const #cir.fp<3.000000e+00> : !cir.float
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[THREE]], %{{.*}}
   // LLVM_OGCG: store float 3.000000e+00, ptr %{{.*}}
 
   d = -2.0;
   // CIR: %[[MINUS_TWO:.*]] = cir.const #cir.fp<-2.000000e+00> : !cir.double
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[MINUS_TWO]], %{{.*}}
   // LLVM_OGCG: store double -2.000000e+00, ptr %{{.*}}
 
   d = -(-3.0);
   // CIR-DUP: %[[UNUSED_THREE:.*]] = cir.const #cir.fp<3.000000e+00> : !cir.double
   // CIR: %[[THREE:.*]] = cir.const #cir.fp<3.000000e+00> : !cir.double
-  // CIR-NOT: cir.unary
+  // CIR-NOT: cir.minus
   // CIR: cir.store{{.*}} %[[THREE]], %{{.*}}
   // LLVM_OGCG: store double 3.000000e+00, ptr %{{.*}}
 }
diff --git a/clang/test/CIR/CodeGen/integer-overflow.c b/clang/test/CIR/CodeGen/integer-overflow.c
index 119c5a917698a..ad4de2da7a769 100644
--- a/clang/test/CIR/CodeGen/integer-overflow.c
+++ b/clang/test/CIR/CodeGen/integer-overflow.c
@@ -43,28 +43,28 @@ void test1(void) {
   // WRAPV-OGCG: mul i32
   f11G = a * b;
 
-  // DEFAULT-CIR:  cir.unary(minus, {{.*}}) nsw : !s32i
+  // DEFAULT-CIR:  cir.minus nsw {{.*}} : !s32i
   // DEFAULT-LLVM: sub nsw i32 0, 
   // DEFAULT-OGCG: sub nsw i32 0, 
-  // WRAPV-CIR:  cir.unary(minus, {{.*}}) : !s32i
+  // WRAPV-CIR:  cir.minus {{.*}} : !s32i
   // WRAPV-LLVM: sub i32 0, 
   // WRAPV-OGCG: sub i32 0, 
   f11G = -a;
   
   // PR7426 - Overflow checking for increments.
   
-  // DEFAULT-CIR:  cir.unary(inc, {{.*}}) nsw : !s32i
+  // DEFAULT-CIR:  cir.inc nsw {{.*}} : !s32i
   // DEFAULT-LLVM: add nsw i32 {{.*}}, 1
   // DEFAULT-OGCG: add nsw i32 {{.*}}, 1
-  // WRAPV-CIR:  cir.unary(inc, {{.*}}) : !s32i
+  // WRAPV-CIR:  cir.inc {{.*}} : !s32i
   // WRAPV-LLVM: add i32 {{.*}}, 1
   // WRAPV-OGCG: add i32 {{.*}}, 1
   ++a;
   
-  // DEFAULT-CIR:  cir.unary(dec, {{.*}}) nsw : !s32i
+  // DEFAULT-CIR:  cir.dec nsw {{.*}} : !s32i
   // DEFAULT-LLVM: sub nsw i32 {{.*}}, 1
   // DEFAULT-OGCG: add nsw i32 {{.*}}, -1
-  // WRAPV-CIR:  cir.unary(dec, {{.*}}) : !s32i
+  // WRAPV-CIR:  cir.dec {{.*}} : !s32i
   // WRAPV-LLVM: sub i32 {{.*}}, 1
   // WRAPV-OGCG: add i32 {{.*}}, -1
   --a;
@@ -84,40 +84,40 @@ void test1(void) {
 
   // PR9350: char pre-increment never overflows.
   extern volatile signed char PR9350_char_inc;
-  // DEFAULT-CIR:  cir.unary(inc, {{.*}}) : !s8i
+  // DEFAULT-CIR:  cir.inc {{.*}} : !s8i
   // DEFAULT-LLVM: add i8 {{.*}}, 1
   // DEFAULT-OGCG: add i8 {{.*}}, 1
-  // WRAPV-CIR:  cir.unary(inc, {{.*}}) : !s8i
+  // WRAPV-CIR:  cir.inc {{.*}} : !s8i
   // WRAPV-LLVM: add i8 {{.*}}, 1
   // WRAPV-OGCG: add i8 {{.*}}, 1
   ++PR9350_char_inc;
 
   // PR9350: char pre-decrement never overflows.
   extern volatile signed char PR9350_char_dec;
-  // DEFAULT-CIR:  cir.unary(dec, {{.*}}) : !s8i
+  // DEFAULT-CIR:  cir.dec {{.*}} : !s8i
   // DEFAULT-LLVM: sub i8 {{.*}}, 1
   // DEFAULT-OGCG: add i8 {{.*}}, -1
-  // WRAPV-CIR:  cir.unary(dec, {{.*}}) : !s8i
+  // WRAPV-CIR:  cir.dec {{.*}} : !s8i
   // WRAPV-LLVM: sub i8 {{.*}}, 1
   // WRAPV-OGCG: add i8 {{.*}}, -1
   --PR9350_char_dec;
 
   // PR9350: short pre-increment never overflows.
   extern volatile signed short PR9350_short_inc;
-  // DEFAULT-CIR:  cir.unary(inc, {{.*}}) : !s16i
+  // DEFAULT-CIR:  cir.inc {{.*}} : !s16i
   // DEFAULT-LLVM: add i16 {{.*}}, 1
   // DEFAULT-OGCG: add i16 {{.*}}, 1
-  // WRAPV-CIR:  cir.unary(inc, {{.*}}) : !s16i
+  // WRAPV-CIR:  cir.inc {{.*}} : !s16i
   // WRAPV-LLVM: add i16 {{.*}}, 1
   // WRAPV-OGCG: add i16 {{.*}}, 1
   ++PR9350_short_inc;
 
   // PR9350: short pre-decrement never overflows.
   extern volatile signed short PR9350_short_dec;
-  // DEFAULT-CIR:  cir.unary(dec, {{.*}}) : !s16i
+  // DEFAULT-CIR:  cir.dec {{.*}} : !s16i
   // DEFAULT-LLVM: sub i16 {{.*}}, 1
   // DEFAULT-OGCG: add i16 {{.*}}, -1
-  // WRAPV-CIR:  cir.unary(dec, {{.*}}) : !s16i
+  // WRAPV-CIR:  cir.dec {{.*}} : !s16i
   // WRAPV-LLVM: sub i16 {{.*}}, 1
   // WRAPV-OGCG: add i16 {{.*}}, -1
   --PR9350_short_dec;
diff --git a/clang/test/CIR/CodeGen/label.c b/clang/test/CIR/CodeGen/label.c
index 93ed6c583b98a..db35168f61ec1 100644
--- a/clang/test/CIR/CodeGen/label.c
+++ b/clang/test/CIR/CodeGen/label.c
@@ -68,7 +68,7 @@ void label_in_if(int cond) {
 // CIR:      ^bb1:
 // CIR:        cir.label "labelD"
 // CIR:        [[LOAD:%.*]] = cir.load align(4) [[COND:%.*]] : !cir.ptr<!s32i>, !s32i
-// CIR:        [[INC:%.*]] = cir.unary(inc, %3) nsw : !s32i, !s32i
+// CIR:        [[INC:%.*]] = cir.inc nsw %3 : !s32i
 // CIR:        cir.store align(4) [[INC]], [[COND]] : !s32i, !cir.ptr<!s32i>
 // CIR:      }
 // CIR:    cir.return
diff --git a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
index 25c17d6b37dde..711249496bbba 100644
--- a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
+++ b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp
@@ -121,7 +121,6 @@ int g3() {
 
 // 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`.
 // CIR:   %[[OPERATOR_RESULT:.*]] = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[LAM_ALLOCA]]){{.*}}
-
 // 2. Load ptr to `__invoke()`.
 // CIR:   cir.store{{.*}} %[[OPERATOR_RESULT]], %[[FN_ADDR]]
 // CIR:   %[[FN:.*]] = cir.load{{.*}} %[[FN_ADDR]]
diff --git a/clang/test/CIR/CodeGen/nrvo.cpp b/clang/test/CIR/CodeGen/nrvo.cpp
index f68573e222606..a3af2d1ff032b 100644
--- a/clang/test/CIR/CodeGen/nrvo.cpp
+++ b/clang/test/CIR/CodeGen/nrvo.cpp
@@ -77,7 +77,7 @@ NonTrivial test_nrvo() {
 // CIR:     cir.return %[[RET]]
 // CIR:   } cleanup  normal {
 // CIR:     %[[NRVO_FLAG_VAL:.*]] = cir.load{{.*}} %[[NRVO_FLAG]]
-// CIR:     %[[NOT_NRVO_VAL:.*]] = cir.unary(not, %[[NRVO_FLAG_VAL]])
+// CIR:     %[[NOT_NRVO_VAL:.*]] = cir.not %[[NRVO_FLAG_VAL]]
 // CIR:     cir.if %[[NOT_NRVO_VAL]] {
 // CIR:       cir.call @_ZN10NonTrivialD1Ev(%[[RESULT]])
 // CIR:     }
diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp
index b07c1b57c127d..d43c5691ebd49 100644
--- a/clang/test/CIR/CodeGen/pointers.cpp
+++ b/clang/test/CIR/CodeGen/pointers.cpp
@@ -28,7 +28,7 @@ void foo(int *iptr, char *cptr, unsigned ustride) {
   iptr - ustride;
   // CHECK: %[[#STRIDE:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
   // CHECK: %[[#SIGNSTRIDE:]] = cir.cast integral %[[#STRIDE]] : !u32i -> !s32i
-  // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i
+  // CHECK: %[[#NEGSTRIDE:]] = cir.minus %[[#SIGNSTRIDE]] : !s32i
   // CHECK: cir.ptr_stride %{{.+}}, %[[#NEGSTRIDE]] : (!cir.ptr<!s32i>, !s32i) -> !cir.ptr<!s32i>
 
   4 + iptr;
diff --git a/clang/test/CIR/CodeGen/static-vars.c b/clang/test/CIR/CodeGen/static-vars.c
index ee4db82c40ade..96ef705d15326 100644
--- a/clang/test/CIR/CodeGen/static-vars.c
+++ b/clang/test/CIR/CodeGen/static-vars.c
@@ -24,7 +24,7 @@ void func1(void) {
   j++;
   // CHECK-DAG: %[[#V2:]] = cir.get_global @func1.j : !cir.ptr<!s32i>
   // CHECK-DAG: %[[#V3:]] = cir.load{{.*}} %[[#V2]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) nsw : !s32i, !s32i
+  // CHECK-DAG: %[[#V4:]] = cir.inc nsw %[[#V3]] : !s32i
   // CHECK-DAG: cir.store{{.*}} %[[#V4]], %[[#V2]] : !s32i, !cir.ptr<!s32i>
 }
 
diff --git a/clang/test/CIR/CodeGen/static-vars.cpp b/clang/test/CIR/CodeGen/static-vars.cpp
index 4f22fc7abc541..4cc7b68baa01f 100644
--- a/clang/test/CIR/CodeGen/static-vars.cpp
+++ b/clang/test/CIR/CodeGen/static-vars.cpp
@@ -57,7 +57,7 @@ void func1(void) {
   j++;
   // CHECK-DAG: %[[#V2:]] = cir.get_global @_ZZ5func1vE1j : !cir.ptr<!s32i>
   // CHECK-DAG: %[[#V3:]] = cir.load{{.*}} %[[#V2]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-DAG: %[[#V4:]] = cir.unary(inc, %[[#V3]]) nsw : !s32i, !s32i
+  // CHECK-DAG: %[[#V4:]] = cir.inc nsw %[[#V3]] : !s32i
   // CHECK-DAG: cir.store{{.*}} %[[#V4]], %[[#V2]] : !s32i, !cir.ptr<!s32i>
 }
 
diff --git a/clang/test/CIR/CodeGen/try-catch.cpp b/clang/test/CIR/CodeGen/try-catch.cpp
index a9ad38a56715d..029562e5b9ca4 100644
--- a/clang/test/CIR/CodeGen/try-catch.cpp
+++ b/clang/test/CIR/CodeGen/try-catch.cpp
@@ -48,7 +48,7 @@ void try_catch_with_empty_catch_all() {
 // CIR:     cir.return
 // CIR:   ^bb1:  // no predecessors
 // CIR:     %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
-// CIR:     %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR:     %[[RESULT:.*]] = cir.inc nsw %[[TMP_A]] : !s32i
 // CIR:     cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
 // CIR:     cir.yield
 // CIR:   }
@@ -90,7 +90,7 @@ void try_catch_with_empty_catch_all_2() {
 // CIR: cir.scope {
 // CIR:   cir.try {
 // CIR:     %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!s32i>, !s32i
-// CIR:     %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) nsw : !s32i, !s32i
+// CIR:     %[[RESULT:.*]] = cir.inc nsw %[[TMP_A]] : !s32i
 // CIR:     cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
 // CIR:     cir.return
 // CIR:   }
diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp
index 7d4c8f27e4545..4446fe5ceb1a4 100644
--- a/clang/test/CIR/CodeGen/unary.cpp
+++ b/clang/test/CIR/CodeGen/unary.cpp
@@ -13,7 +13,6 @@ unsigned up0() {
 // CHECK: cir.func{{.*}} @_Z3up0v() -> (!u32i{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK-NOT: cir.unary
 
 // LLVM: define{{.*}} i32 @_Z3up0v()
 // LLVM:   %[[RV:.*]] = alloca i32, i64 1, align 4
@@ -34,7 +33,7 @@ unsigned um0() {
 // CHECK: cir.func{{.*}} @_Z3um0v() -> (!u32i{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
+// CHECK:   %[[OUTPUT:.*]] = cir.minus %[[INPUT]]
 
 // LLVM: define{{.*}} i32 @_Z3um0v()
 // LLVM:   %[[RV:.*]] = alloca i32, i64 1, align 4
@@ -57,7 +56,7 @@ unsigned un0() {
 // CHECK: cir.func{{.*}} @_Z3un0v() -> (!u32i{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !u32i, !cir.ptr<!u32i>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[OUTPUT:.*]] = cir.unary(not, %[[INPUT]])
+// CHECK:   %[[OUTPUT:.*]] = cir.not %[[INPUT]]
 
 // LLVM: define{{.*}} i32 @_Z3un0v()
 // LLVM:   %[[RV:.*]] = alloca i32, i64 1, align 4
@@ -83,7 +82,7 @@ int inc0() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !s32i
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]]) nsw
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc nsw %[[INPUT]]
 // CHECK:   cir.store{{.*}} %[[INCREMENTED]], %[[A]]
 // CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load{{.*}} %[[A]]
 
@@ -111,7 +110,7 @@ int dec0() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !s32i
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]]) nsw
+// CHECK:   %[[DECREMENTED:.*]] = cir.dec nsw %[[INPUT]]
 // CHECK:   cir.store{{.*}} %[[DECREMENTED]], %[[A]]
 // CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load{{.*}} %[[A]]
 
@@ -139,7 +138,7 @@ int inc1() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !s32i
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]]) nsw
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc nsw %[[INPUT]]
 // CHECK:   cir.store{{.*}} %[[INCREMENTED]], %[[A]]
 // CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load{{.*}} %[[A]]
 
@@ -167,7 +166,7 @@ int dec1() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !s32i
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]]) nsw
+// CHECK:   %[[DECREMENTED:.*]] = cir.dec nsw %[[INPUT]]
 // CHECK:   cir.store{{.*}} %[[DECREMENTED]], %[[A]]
 // CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load{{.*}} %[[A]]
 
@@ -197,7 +196,7 @@ int inc2() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !s32i
 // CHECK:   %[[ATOB:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[ATOB]]) nsw
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc nsw %[[ATOB]]
 // CHECK:   cir.store{{.*}} %[[INCREMENTED]], %[[A]]
 // CHECK:   cir.store{{.*}} %[[ATOB]], %[[B]]
 // CHECK:   %[[B_TO_OUTPUT:.*]] = cir.load{{.*}} %[[B]]
@@ -231,7 +230,6 @@ float fpPlus() {
 // CHECK: cir.func{{.*}} @_Z6fpPlusv() -> (!cir.float{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK-NOT: cir.unary
 
 // LLVM: define{{.*}} float @_Z6fpPlusv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -252,7 +250,7 @@ float fpMinus() {
 // CHECK: cir.func{{.*}} @_Z7fpMinusv() -> (!cir.float{{.*}})
 // CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", init]
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
+// CHECK:   %[[OUTPUT:.*]] = cir.minus %[[INPUT]]
 
 // LLVM: define{{.*}} float @_Z7fpMinusv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -277,7 +275,7 @@ float fpPreInc() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !cir.float
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc %[[INPUT]]
 
 // LLVM: define{{.*}} float @_Z8fpPreIncv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -302,7 +300,7 @@ float fpPreDec() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !cir.float
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+// CHECK:   %[[DECREMENTED:.*]] = cir.dec %[[INPUT]]
 
 // LLVM: define{{.*}} float @_Z8fpPreDecv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -327,7 +325,7 @@ float fpPostInc() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !cir.float
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc %[[INPUT]]
 
 // LLVM: define{{.*}} float @_Z9fpPostIncv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -352,7 +350,7 @@ float fpPostDec() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !cir.float
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+// CHECK:   %[[DECREMENTED:.*]] = cir.dec %[[INPUT]]
 
 // LLVM: define{{.*}} float @_Z9fpPostDecv()
 // LLVM:   %[[RV:.*]] = alloca float, i64 1, align 4
@@ -380,7 +378,7 @@ float fpPostInc2() {
 // CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
 // CHECK:   cir.store{{.*}} %[[ATMP]], %[[A]] : !cir.float
 // CHECK:   %[[ATOB:.*]] = cir.load{{.*}} %[[A]]
-// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[ATOB]])
+// CHECK:   %[[INCREMENTED:.*]] = cir.inc %[[ATOB]]
 // CHECK:   cir.store{{.*}} %[[INCREMENTED]], %[[A]]
 // CHECK:   cir.store{{.*}} %[[ATOB]], %[[B]]
 // CHECK:   %[[B_TO_OUTPUT:.*]] = cir.load{{.*}} %[[B]]
@@ -411,18 +409,17 @@ void chars(char c) {
 
   int c1 = +c;
   // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
-  // CHECK-NOT: cir.unary
   int c2 = -c;
   // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i
-  // CHECK: cir.unary(minus, %[[PROMO]]) nsw : !s32i, !s32i
+  // CHECK: cir.minus nsw %[[PROMO]] : !s32i
 
   // Chars can go through some integer promotion codegen paths even when not promoted.
   // These should not have nsw attributes because the intermediate promotion makes the
   // overflow defined behavior.
-  ++c; // CHECK: cir.unary(inc, %{{.+}}) : !s8i, !s8i
-  --c; // CHECK: cir.unary(dec, %{{.+}}) : !s8i, !s8i
-  c++; // CHECK: cir.unary(inc, %{{.+}}) : !s8i, !s8i
-  c--; // CHECK: cir.unary(dec, %{{.+}}) : !s8i, !s8i
+  ++c; // CHECK: cir.inc %{{.+}} : !s8i
+  --c; // CHECK: cir.dec %{{.+}} : !s8i
+  c++; // CHECK: cir.inc %{{.+}} : !s8i
+  c--; // CHECK: cir.dec %{{.+}} : !s8i
 }
 
 _Float16 fp16UPlus(_Float16 f) {
@@ -432,7 +429,6 @@ _Float16 fp16UPlus(_Float16 f) {
 // CHECK: cir.func{{.*}} @_Z9fp16UPlusDF16_({{.*}}) -> (!cir.f16{{.*}})
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]]
 // CHECK:   %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float
-// CHECK-NOT: cir.unary
 // CHECK:   %[[UNPROMOTED:.*]] = cir.cast floating %[[PROMOTED]] : !cir.float -> !cir.f16
 
 // LLVM: define{{.*}} half @_Z9fp16UPlusDF16_({{.*}})
@@ -452,7 +448,7 @@ _Float16 fp16UMinus(_Float16 f) {
 // CHECK: cir.func{{.*}} @_Z10fp16UMinusDF16_({{.*}}) -> (!cir.f16{{.*}})
 // CHECK:   %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]]
 // CHECK:   %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float
-// CHECK:   %[[RESULT:.*]] = cir.unary(minus, %[[PROMOTED]])
+// CHECK:   %[[RESULT:.*]] = cir.minus %[[PROMOTED]]
 // CHECK:   %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16
 
 // LLVM: define{{.*}} half @_Z10fp16UMinusDF16_({{.*}})
@@ -483,24 +479,24 @@ void test_logical_not() {
 // CHECK: cir.func{{.*}} @_Z16test_logical_notv()
 // CHECK:   %[[A:.*]] = cir.load{{.*}} %[[A_ADDR:.*]] : !cir.ptr<!s32i>, !s32i
 // CHECK:   %[[A_BOOL:.*]] = cir.cast int_to_bool %[[A]] : !s32i -> !cir.bool
-// CHECK:   %[[A_NOT:.*]] = cir.unary(not, %[[A_BOOL]]) : !cir.bool, !cir.bool
+// CHECK:   %[[A_NOT:.*]] = cir.not %[[A_BOOL]] : !cir.bool
 // CHECK:   %[[A_CAST:.*]] = cir.cast bool_to_int %[[A_NOT]] : !cir.bool -> !s32i
 // CHECK:   cir.store{{.*}} %[[A_CAST]], %[[A_ADDR]] : !s32i, !cir.ptr<!s32i>
 // CHECK:   %[[B:.*]] = cir.load{{.*}} %[[B_ADDR:.*]] : !cir.ptr<!cir.bool>, !cir.bool
-// CHECK:   %[[B_NOT:.*]] = cir.unary(not, %[[B]]) : !cir.bool, !cir.bool
+// CHECK:   %[[B_NOT:.*]] = cir.not %[[B]] : !cir.bool
 // CHECK:   cir.store{{.*}} %[[B_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
 // CHECK:   %[[C:.*]] = cir.load{{.*}} %[[C_ADDR:.*]] : !cir.ptr<!cir.float>, !cir.float
 // CHECK:   %[[C_BOOL:.*]] = cir.cast float_to_bool %[[C]] : !cir.float -> !cir.bool
-// CHECK:   %[[C_NOT:.*]] = cir.unary(not, %[[C_BOOL]]) : !cir.bool, !cir.bool
+// CHECK:   %[[C_NOT:.*]] = cir.not %[[C_BOOL]] : !cir.bool
 // CHECK:   %[[C_CAST:.*]] = cir.cast bool_to_float %[[C_NOT]] : !cir.bool -> !cir.float
 // CHECK:   cir.store{{.*}} %[[C_CAST]], %[[C_ADDR]] : !cir.float, !cir.ptr<!cir.float>
 // CHECK:   %[[P:.*]] = cir.load{{.*}} %[[P_ADDR:.*]] : !cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
 // CHECK:   %[[P_BOOL:.*]] = cir.cast ptr_to_bool %[[P]] : !cir.ptr<!s32i> -> !cir.bool
-// CHECK:   %[[P_NOT:.*]] = cir.unary(not, %[[P_BOOL]]) : !cir.bool, !cir.bool
+// CHECK:   %[[P_NOT:.*]] = cir.not %[[P_BOOL]] : !cir.bool
 // CHECK:   cir.store{{.*}} %[[P_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
 // CHECK:   %[[D:.*]] = cir.load{{.*}} %[[D_ADDR:.*]] : !cir.ptr<!cir.double>, !cir.double
 // CHECK:   %[[D_BOOL:.*]] = cir.cast float_to_bool %[[D]] : !cir.double -> !cir.bool
-// CHECK:   %[[D_NOT:.*]] = cir.unary(not, %[[D_BOOL]]) : !cir.bool, !cir.bool
+// CHECK:   %[[D_NOT:.*]] = cir.not %[[D_BOOL]] : !cir.bool
 // CHECK:   cir.store{{.*}} %[[D_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr<!cir.bool>
 
 // LLVM: define{{.*}} void @_Z16test_logical_notv()
@@ -567,7 +563,6 @@ void f16NestedUPlus() {
 // CHECK:  %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
 // CHECK:  %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
 // CHECK:  %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
-// CHECK-NOT: cir.unary
 // CHECK:  %[[RESULT:.*]] = cir.cast floating %[[A_F32]] : !cir.float -> !cir.f16
 // CHECK:  cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
 
@@ -597,8 +592,8 @@ void f16NestedUMinus() {
 // CHECK:  %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
 // CHECK:  %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
 // CHECK:  %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float
-// CHECK:  %[[A_MINUS:.*]] = cir.unary(minus, %[[A_F32]]) : !cir.float, !cir.float
-// CHECK:  %[[RESULT_F32:.*]] = cir.unary(minus, %[[A_MINUS]]) : !cir.float, !cir.float
+// CHECK:  %[[A_MINUS:.*]] = cir.minus %[[A_F32]] : !cir.float
+// CHECK:  %[[RESULT_F32:.*]] = cir.minus %[[A_MINUS]] : !cir.float
 // CHECK:  %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16
 // CHECK:  cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
 
diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp
index a32bc94bae103..74de78ca8a987 100644
--- a/clang/test/CIR/CodeGen/vector-ext.cpp
+++ b/clang/test/CIR/CodeGen/vector-ext.cpp
@@ -334,10 +334,10 @@ void foo8() {
 // CIR: %[[TMP1:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[TMP1]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP2:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
+// CIR: %[[MINUS:.*]] = cir.minus %[[TMP2]] : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP3:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[NOT:.*]] = cir.unary(not, %[[TMP3]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
+// CIR: %[[NOT:.*]] = cir.not %[[TMP3]] : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[NOT]], %[[NOT_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 
 // LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp
index 26b4e4a8f7f8a..0edef8dee8702 100644
--- a/clang/test/CIR/CodeGen/vector.cpp
+++ b/clang/test/CIR/CodeGen/vector.cpp
@@ -321,10 +321,10 @@ void foo8() {
 // CIR: %[[TMP1:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[TMP1]], %[[PLUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP2:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[MINUS:.*]] = cir.unary(minus, %[[TMP2]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
+// CIR: %[[MINUS:.*]] = cir.minus %[[TMP2]] : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[MINUS]], %[[MINUS_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 // CIR: %[[TMP3:.*]] = cir.load{{.*}} %[[VEC]] : !cir.ptr<!cir.vector<4 x !s32i>>, !cir.vector<4 x !s32i>
-// CIR: %[[NOT:.*]] = cir.unary(not, %[[TMP3]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
+// CIR: %[[NOT:.*]] = cir.not %[[TMP3]] : !cir.vector<4 x !s32i>
 // CIR: cir.store{{.*}} %[[NOT]], %[[NOT_RES]] : !cir.vector<4 x !s32i>, !cir.ptr<!cir.vector<4 x !s32i>>
 
 // LLVM: %[[VEC:.*]] = alloca <4 x i32>, i64 1, align 16
diff --git a/clang/test/CIR/CodeGen/vla.c b/clang/test/CIR/CodeGen/vla.c
index f30a438834660..f946f23504a7d 100644
--- a/clang/test/CIR/CodeGen/vla.c
+++ b/clang/test/CIR/CodeGen/vla.c
@@ -168,7 +168,7 @@ void f3(unsigned len) {
 // CIR:   cir.scope {
 // CIR:     cir.while {
 // CIR:     %[[CUR_I:.*]] = cir.load{{.*}} %[[I]]
-// CIR:     %[[NEXT:.*]] = cir.unary(inc, %[[CUR_I]])
+// CIR:     %[[NEXT:.*]] = cir.inc %[[CUR_I]]
 // CIR:     cir.store{{.*}} %[[NEXT]], %[[I]]
 // CIR:     %[[LEN2:.*]] = cir.load{{.*}} %[[LEN_ADDR]]
 // CIR:     %[[CMP:.*]] = cir.cmp(lt, %[[NEXT]], %[[LEN2]])
diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
index dbd8d6ba56dc0..787153853e75e 100644
--- a/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
+++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512bw-builtins.c
@@ -209,7 +209,7 @@ __mmask32 test_kandn_mask32(__mmask32 A, __mmask32 B) {
   // CIR-LABEL: _kandn_mask32
   // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.and {{.*}}, {{.*}} : !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<s, 1>> -> !u32i
 
@@ -233,7 +233,7 @@ __mmask64 test_kandn_mask64(__mmask64 A, __mmask64 B) {
   // CIR-LABEL: _kandn_mask64
   // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.and {{.*}}, {{.*}} : !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<s, 1>> -> !u64i
 
@@ -341,7 +341,7 @@ __mmask32 test_kxnor_mask32(__mmask32 A, __mmask32 B) {
   // CIR-LABEL: _kxnor_mask32
   // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.xor {{.*}}, {{.*}} : !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<s, 1>> -> !u32i
 
@@ -366,7 +366,7 @@ __mmask64 test_kxnor_mask64(__mmask64 A, __mmask64 B) {
   // CIR-LABEL: _kxnor_mask64
   // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.xor {{.*}}, {{.*}} : !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<s, 1>> -> !u64i
 
@@ -391,7 +391,7 @@ __mmask64 test_kxnor_mask64(__mmask64 A, __mmask64 B) {
 __mmask32 test_knot_mask32(__mmask32 A) {
   // CIR-LABEL: _knot_mask32
   // CIR: cir.cast bitcast {{.*}} : !u32i -> !cir.vector<32 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<32 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<32 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<32 x !cir.int<s, 1>> -> !u32i
 
   // LLVM-LABEL: _knot_mask32
@@ -409,7 +409,7 @@ __mmask32 test_knot_mask32(__mmask32 A) {
 __mmask64 test_knot_mask64(__mmask64 A) {
   // CIR-LABEL: _knot_mask64
   // CIR: cir.cast bitcast {{.*}} : !u64i -> !cir.vector<64 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<64 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<64 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<64 x !cir.int<s, 1>> -> !u64i
 
   // LLVM-LABEL: _knot_mask64
diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c
index 80ade8e71e304..d24e03865825a 100644
--- a/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c
+++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512dq-builtins.c
@@ -96,7 +96,7 @@ __mmask8 test_kandn_mask8(__mmask8 A, __mmask8 B) {
  // CIR-LABEL: _kandn_mask8
  // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int<s, 1>>
- // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int<s, 1>>
+ // CIR: cir.not {{.*}} : !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.and {{.*}}, {{.*}} : !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int<s, 1>> -> !u8i
 
@@ -163,7 +163,7 @@ __mmask8 test_kxnor_mask8(__mmask8 A, __mmask8 B) {
  // CIR-LABEL: _kxnor_mask8
  // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int<s, 1>>
- // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int<s, 1>>
+ // CIR: cir.not {{.*}} : !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.xor {{.*}}, {{.*}} : !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int<s, 1>> -> !u8i
 
@@ -187,7 +187,7 @@ __mmask8 test_kxnor_mask8(__mmask8 A, __mmask8 B) {
 __mmask8 test_knot_mask8(__mmask8 A) {
  // CIR-LABEL: _knot_mask8
  // CIR: cir.cast bitcast {{.*}} : !u8i -> !cir.vector<8 x !cir.int<s, 1>>
- // CIR: cir.unary(not, {{.*}}) : !cir.vector<8 x !cir.int<s, 1>>
+ // CIR: cir.not {{.*}} : !cir.vector<8 x !cir.int<s, 1>>
  // CIR: cir.cast bitcast {{.*}} : !cir.vector<8 x !cir.int<s, 1>> -> !u8i
 
  // LLVM-LABEL: _knot_mask8
diff --git a/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c
index 6109b696c045b..ccb636971255a 100644
--- a/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c
+++ b/clang/test/CIR/CodeGenBuiltins/X86/avx512f-builtins.c
@@ -199,7 +199,7 @@ __mmask16 test_mm512_kandn(__mmask16 A, __mmask16 B) {
   // CIR-LABEL: _mm512_kandn
   // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.and {{.*}}, {{.*}} : !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int<s, 1>> -> !u16i
 
@@ -244,7 +244,7 @@ __mmask16 test_mm512_kxnor(__mmask16 A, __mmask16 B) {
   // CIR-LABEL: _mm512_kxnor
   // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.xor {{.*}}, {{.*}} : !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int<s, 1>> -> !u16i
 
@@ -288,7 +288,7 @@ __mmask16 test_mm512_kxor(__mmask16 A, __mmask16 B) {
 __mmask16 test_mm512_knot(__mmask16 A) {
   // CIR-LABEL: _mm512_knot
   // CIR: cir.cast bitcast {{.*}} : !u16i -> !cir.vector<16 x !cir.int<s, 1>>
-  // CIR: cir.unary(not, {{.*}}) : !cir.vector<16 x !cir.int<s, 1>>
+  // CIR: cir.not {{.*}} : !cir.vector<16 x !cir.int<s, 1>>
   // CIR: cir.cast bitcast {{.*}} : !cir.vector<16 x !cir.int<s, 1>> -> !u16i
 
   // LLVM-LABEL: _mm512_knot
diff --git a/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c b/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c
index aa7f8d7c6a950..cc14f2e2aa405 100644
--- a/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c
+++ b/clang/test/CIR/CodeGenBuiltins/builtin-fcmp-sse.c
@@ -19,7 +19,7 @@ __m128 test_cmpnleps(__m128 A, __m128 B) {
   // CIR:           %[[LOAD_0:.*]] = cir.load align(16) %[[ALLOCA_0]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float>
   // CIR:           %[[LOAD_1:.*]] = cir.load align(16) %[[ALLOCA_1]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float>
   // CIR:           %[[VEC_0:.*]] = cir.vec.cmp(le, %[[LOAD_0]], %[[LOAD_1]]) : !cir.vector<4 x !cir.float>, !cir.vector<4 x !s32i>
-  // CIR:           %[[UNARY_0:.*]] = cir.unary(not, %[[VEC_0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>
+  // CIR:           %[[UNARY_0:.*]] = cir.not %[[VEC_0]] : !cir.vector<4 x !s32i>
   // CIR:           %[[CAST_0:.*]] = cir.cast bitcast %[[UNARY_0]] : !cir.vector<4 x !s32i> -> !cir.vector<4 x !cir.float>
   // CIR:           cir.store %[[CAST_0]], %[[ALLOCA_2]] : !cir.vector<4 x !cir.float>, !cir.ptr<!cir.vector<4 x !cir.float>>
   // CIR:           %[[LOAD_2:.*]] = cir.load %[[ALLOCA_2]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float>
@@ -70,7 +70,7 @@ __m128d test_cmpnlepd(__m128d A, __m128d B) {
   // CIR:           %[[LOAD_0:.*]] = cir.load align(16) %[[ALLOCA_0]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
   // CIR:           %[[LOAD_1:.*]] = cir.load align(16) %[[ALLOCA_1]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
   // CIR:           %[[VEC_0:.*]] = cir.vec.cmp(le, %[[LOAD_0]], %[[LOAD_1]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i> 
-  // CIR:           %[[UNARY_0:.*]] = cir.unary(not, %[[VEC_0]]) : !cir.vector<2 x !s64i>, !cir.vector<2 x !s64i> 
+  // CIR:           %[[UNARY_0:.*]] = cir.not %[[VEC_0]] : !cir.vector<2 x !s64i> 
   // CIR:           %[[CAST_0:.*]] = cir.cast bitcast %[[UNARY_0]] : !cir.vector<2 x !s64i> -> !cir.vector<2 x !cir.double> 
   // CIR:           cir.store %[[CAST_0]], %[[ALLOCA_2]] : !cir.vector<2 x !cir.double>, !cir.ptr<!cir.vector<2 x !cir.double>> 
   // CIR:           %[[LOAD_2:.*]] = cir.load %[[ALLOCA_2]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
@@ -121,7 +121,7 @@ __m128 test_cmpnltps(__m128 A, __m128 B) {
   // CIR:           %[[LOAD_0:.*]] = cir.load align(16) %[[ALLOCA_0]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float> 
   // CIR:           %[[LOAD_1:.*]] = cir.load align(16) %[[ALLOCA_1]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float> 
   // CIR:           %[[VEC_0:.*]] = cir.vec.cmp(lt, %[[LOAD_0]], %[[LOAD_1]]) : !cir.vector<4 x !cir.float>, !cir.vector<4 x !s32i> 
-  // CIR:           %[[UNARY_0:.*]] = cir.unary(not, %[[VEC_0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> 
+  // CIR:           %[[UNARY_0:.*]] = cir.not %[[VEC_0]] : !cir.vector<4 x !s32i> 
   // CIR:           %[[CAST_0:.*]] = cir.cast bitcast %[[UNARY_0]] : !cir.vector<4 x !s32i> -> !cir.vector<4 x !cir.float> 
   // CIR:           cir.store %[[CAST_0]], %[[ALLOCA_2]] : !cir.vector<4 x !cir.float>, !cir.ptr<!cir.vector<4 x !cir.float>> 
   // CIR:           %[[LOAD_2:.*]] = cir.load %[[ALLOCA_2]] : !cir.ptr<!cir.vector<4 x !cir.float>>, !cir.vector<4 x !cir.float> 
@@ -172,7 +172,7 @@ __m128d test_cmpnltpd(__m128d A, __m128d B) {
   // CIR:           %[[LOAD_0:.*]] = cir.load align(16) %[[ALLOCA_0]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
   // CIR:           %[[LOAD_1:.*]] = cir.load align(16) %[[ALLOCA_1]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
   // CIR:           %[[VEC_0:.*]] = cir.vec.cmp(lt, %[[LOAD_0]], %[[LOAD_1]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i> 
-  // CIR:           %[[UNARY_0:.*]] = cir.unary(not, %[[VEC_0]]) : !cir.vector<2 x !s64i>, !cir.vector<2 x !s64i> 
+  // CIR:           %[[UNARY_0:.*]] = cir.not %[[VEC_0]] : !cir.vector<2 x !s64i> 
   // CIR:           %[[CAST_0:.*]] = cir.cast bitcast %[[UNARY_0]] : !cir.vector<2 x !s64i> -> !cir.vector<2 x !cir.double> 
   // CIR:           cir.store %[[CAST_0]], %[[ALLOCA_2]] : !cir.vector<2 x !cir.double>, !cir.ptr<!cir.vector<2 x !cir.double>> 
   // CIR:           %[[LOAD_2:.*]] = cir.load %[[ALLOCA_2]] : !cir.ptr<!cir.vector<2 x !cir.double>>, !cir.vector<2 x !cir.double> 
diff --git a/clang/test/CIR/CodeGenBuiltins/builtin-types-compatible.c b/clang/test/CIR/CodeGenBuiltins/builtin-types-compatible.c
index dcf5fd4246481..85135ac66a5b3 100644
--- a/clang/test/CIR/CodeGenBuiltins/builtin-types-compatible.c
+++ b/clang/test/CIR/CodeGenBuiltins/builtin-types-compatible.c
@@ -30,7 +30,7 @@ int test_convert_bool_to_int() {
 // CIR: cir.func {{.*}} @test_convert_bool_to_int()
 // CIR:   %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
 // CIR:   %[[BOOL:.*]] = cir.cast int_to_bool %[[ONE]] : !s32i -> !cir.bool
-// CIR:   %[[NOT:.*]] = cir.unary(not, %[[BOOL]]) : !cir.bool, !cir.bool
+// CIR:   %[[NOT:.*]] = cir.not %[[BOOL]] : !cir.bool
 // CIR:   cir.if %[[NOT]] {
 // CIR:     %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s32i
 // CIR:     cir.store %[[NEG_ONE]], %[[RETVAL:.*]]
diff --git a/clang/test/CIR/CodeGenOpenACC/atomic-capture.cpp b/clang/test/CIR/CodeGenOpenACC/atomic-capture.cpp
index 258d3ded865c8..bf6b1c57ff039 100644
--- a/clang/test/CIR/CodeGenOpenACC/atomic-capture.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/atomic-capture.cpp
@@ -30,7 +30,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -47,7 +47,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -66,7 +66,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -83,7 +83,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   // 
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -346,7 +346,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -367,7 +367,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -387,7 +387,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -408,7 +408,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[INC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -430,7 +430,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -451,7 +451,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -471,7 +471,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
@@ -492,7 +492,7 @@ void use(int x, int v, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[X_VAR]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[X_VAR_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[DEC:.*]] = cir.dec nsw %[[X_VAR_LOAD]] : !s32i
   // CHECK-NEXT: cir.store{{.*}} %[[DEC]], %[[X_VAR_ALLOC]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[X_VAR_LOAD:.*]] = cir.load{{.*}} %[[X_VAR_ALLOC]] : !cir.ptr<!s32i>, !s32i
diff --git a/clang/test/CIR/CodeGenOpenACC/atomic-update.cpp b/clang/test/CIR/CodeGenOpenACC/atomic-update.cpp
index 9ee49f36e86bf..3ef98cd757a67 100644
--- a/clang/test/CIR/CodeGenOpenACC/atomic-update.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/atomic-update.cpp
@@ -22,7 +22,7 @@ void use(int x, unsigned int y, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[RECIPE_ARG]], %[[TEMP_ALLOCA]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!s32i>, !s32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[TEMP_LOAD]]) nsw : !s32i, !s32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc nsw %[[TEMP_LOAD]] : !s32i
   // CHECK-NEXT: cir.store {{.*}}%[[INC]], %[[TEMP_ALLOCA]] : !s32i, !cir.ptr<!s32i>
   //
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!s32i>, !s32i
@@ -37,7 +37,7 @@ void use(int x, unsigned int y, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[RECIPE_ARG]], %[[TEMP_ALLOCA]] : !u32i, !cir.ptr<!u32i>
   //
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!u32i>, !u32i
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[TEMP_LOAD]]) : !u32i, !u32i
+  // CHECK-NEXT: %[[INC:.*]] = cir.inc %[[TEMP_LOAD]] : !u32i
   // CHECK-NEXT: cir.store {{.*}}%[[INC]], %[[TEMP_ALLOCA]] : !u32i, !cir.ptr<!u32i>
   // 
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!u32i>, !u32i
@@ -52,7 +52,7 @@ void use(int x, unsigned int y, float f, HasOps ops) {
   // CHECK-NEXT: cir.store %[[RECIPE_ARG]], %[[TEMP_ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
   //
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!cir.float>, !cir.float
-  // CHECK-NEXT: %[[INC:.*]] = cir.unary(dec, %[[TEMP_LOAD]]) : !cir.float, !cir.float
+  // CHECK-NEXT: %[[INC:.*]] = cir.dec %[[TEMP_LOAD]] : !cir.float
   // CHECK-NEXT: cir.store {{.*}}%[[INC]], %[[TEMP_ALLOCA]] : !cir.float, !cir.ptr<!cir.float>
   // 
   // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load{{.*}} %[[TEMP_ALLOCA]] : !cir.ptr<!cir.float>, !cir.float
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
index 98b6f70c97a22..15a233c2ed638 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp
@@ -109,7 +109,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -145,7 +145,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -180,7 +180,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -215,7 +215,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -250,7 +250,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -285,7 +285,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -314,7 +314,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
index e2545a81bb75b..2050b7f4263ae 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp
@@ -113,7 +113,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -149,7 +149,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
index ee7bdf2b35c04..39725e493b5b6 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
@@ -616,7 +616,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -685,7 +685,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -793,7 +793,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -900,7 +900,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -957,7 +957,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1014,7 +1014,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1071,7 +1071,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1178,7 +1178,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1286,7 +1286,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1332,7 +1332,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1393,7 +1393,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1438,7 +1438,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1499,7 +1499,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1544,7 +1544,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1647,7 +1647,7 @@ void acc_combined() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1692,7 +1692,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1795,7 +1795,7 @@ void acc_combined() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1834,7 +1834,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1922,7 +1922,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1971,7 +1971,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2010,7 +2010,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2104,7 +2104,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2205,7 +2205,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2250,7 +2250,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2351,7 +2351,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
index d06fd9c98e69e..6f8735c668e09 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
@@ -166,7 +166,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -204,7 +204,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -249,7 +249,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -294,7 +294,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -342,7 +342,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -390,7 +390,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -454,7 +454,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -486,7 +486,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -518,7 +518,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -550,7 +550,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -589,7 +589,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -621,7 +621,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -660,7 +660,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -692,7 +692,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -734,7 +734,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -766,7 +766,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -808,7 +808,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
index bca98cbcbdfae..bb9714f6b1788 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
@@ -376,7 +376,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -520,7 +520,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -669,7 +669,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -818,7 +818,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -961,7 +961,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1046,7 +1046,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1130,7 +1130,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1277,7 +1277,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1365,7 +1365,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1430,7 +1430,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1459,7 +1459,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1488,7 +1488,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1533,7 +1533,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1562,7 +1562,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1591,7 +1591,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1636,7 +1636,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1671,7 +1671,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1700,7 +1700,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1745,7 +1745,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1780,7 +1780,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1809,7 +1809,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1854,7 +1854,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1912,7 +1912,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1957,7 +1957,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1986,7 +1986,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2015,7 +2015,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2060,7 +2060,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2089,7 +2089,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2118,7 +2118,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2163,7 +2163,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2195,7 +2195,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2224,7 +2224,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2269,7 +2269,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2301,7 +2301,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2330,7 +2330,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
index 8e09bbed1ad00..cb03a459dbf35 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
@@ -218,7 +218,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -256,7 +256,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -301,7 +301,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -346,7 +346,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -384,7 +384,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -460,7 +460,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -508,7 +508,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -556,7 +556,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -588,7 +588,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -620,7 +620,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -652,7 +652,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -684,7 +684,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -716,7 +716,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -755,7 +755,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -787,7 +787,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -826,7 +826,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -858,7 +858,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -890,7 +890,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -922,7 +922,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -954,7 +954,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -986,7 +986,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1018,7 +1018,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1050,7 +1050,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1092,7 +1092,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1124,7 +1124,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1166,7 +1166,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
index a70e6d7fee43a..c4514fbfaef56 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
@@ -375,7 +375,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -519,7 +519,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -668,7 +668,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -817,7 +817,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -960,7 +960,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1045,7 +1045,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1129,7 +1129,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1276,7 +1276,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1364,7 +1364,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1429,7 +1429,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1458,7 +1458,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1487,7 +1487,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1532,7 +1532,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1561,7 +1561,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1590,7 +1590,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1635,7 +1635,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1670,7 +1670,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1699,7 +1699,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1744,7 +1744,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1779,7 +1779,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1808,7 +1808,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1853,7 +1853,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1882,7 +1882,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1911,7 +1911,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1956,7 +1956,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1985,7 +1985,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2014,7 +2014,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2088,7 +2088,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2117,7 +2117,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2162,7 +2162,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2194,7 +2194,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2223,7 +2223,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2268,7 +2268,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2300,7 +2300,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2329,7 +2329,7 @@ void acc_combined() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
index 2631f73822c2d..c0de9a31aefcf 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c
@@ -62,7 +62,7 @@ struct NoCopyConstruct {};
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -98,7 +98,7 @@ struct NoCopyConstruct {};
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -133,7 +133,7 @@ struct NoCopyConstruct {};
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
index fdeb279370473..a56b56c46999a 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp
@@ -110,7 +110,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -146,7 +146,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -181,7 +181,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -216,7 +216,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -251,7 +251,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -286,7 +286,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -315,7 +315,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
index 3fac928a45389..48a6d46074737 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp
@@ -102,7 +102,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -137,7 +137,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
index 1ac2d7b26686c..052647411f439 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
@@ -636,7 +636,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -705,7 +705,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -816,7 +816,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -927,7 +927,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -984,7 +984,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1041,7 +1041,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1098,7 +1098,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1208,7 +1208,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1319,7 +1319,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1365,7 +1365,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1426,7 +1426,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1471,7 +1471,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1532,7 +1532,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1577,7 +1577,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1684,7 +1684,7 @@ void acc_compute() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1729,7 +1729,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1836,7 +1836,7 @@ void acc_compute() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1875,7 +1875,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1924,7 +1924,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1963,7 +1963,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2012,7 +2012,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2051,7 +2051,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2100,7 +2100,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2145,7 +2145,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2249,7 +2249,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2294,7 +2294,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2398,7 +2398,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
index bd6dc238c9308..9891fd858a986 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
@@ -616,7 +616,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -685,7 +685,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -793,7 +793,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -900,7 +900,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -957,7 +957,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1014,7 +1014,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1071,7 +1071,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1178,7 +1178,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1286,7 +1286,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1332,7 +1332,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1393,7 +1393,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1438,7 +1438,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1499,7 +1499,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1544,7 +1544,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1647,7 +1647,7 @@ void acc_compute() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1692,7 +1692,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1795,7 +1795,7 @@ void acc_compute() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1834,7 +1834,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1922,7 +1922,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1971,7 +1971,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2010,7 +2010,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2104,7 +2104,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2205,7 +2205,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2250,7 +2250,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2351,7 +2351,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
index a44592471fa3f..d9406986b6b88 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
@@ -167,7 +167,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -205,7 +205,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -250,7 +250,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -295,7 +295,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -344,7 +344,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -393,7 +393,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -425,7 +425,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -457,7 +457,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -489,7 +489,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -521,7 +521,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -553,7 +553,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -592,7 +592,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -624,7 +624,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -663,7 +663,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -695,7 +695,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -738,7 +738,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -770,7 +770,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -813,7 +813,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
index aebcf23a582ad..e838eaf5049e3 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
@@ -166,7 +166,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -204,7 +204,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -249,7 +249,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -294,7 +294,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -342,7 +342,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -390,7 +390,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -454,7 +454,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -486,7 +486,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -518,7 +518,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -550,7 +550,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -589,7 +589,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -621,7 +621,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -660,7 +660,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -692,7 +692,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -734,7 +734,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -766,7 +766,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -808,7 +808,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
index f8003f8bb0c79..39beed64b12fd 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
@@ -376,7 +376,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -520,7 +520,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -669,7 +669,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -818,7 +818,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -961,7 +961,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1046,7 +1046,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1130,7 +1130,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1277,7 +1277,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1365,7 +1365,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1430,7 +1430,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1459,7 +1459,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1488,7 +1488,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1533,7 +1533,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1562,7 +1562,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1591,7 +1591,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1636,7 +1636,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1671,7 +1671,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1700,7 +1700,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1745,7 +1745,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1780,7 +1780,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1809,7 +1809,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1854,7 +1854,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1912,7 +1912,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1957,7 +1957,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1986,7 +1986,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2015,7 +2015,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2060,7 +2060,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2089,7 +2089,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2118,7 +2118,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2163,7 +2163,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2195,7 +2195,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2224,7 +2224,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2269,7 +2269,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2301,7 +2301,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2330,7 +2330,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
index 3f1362ab3716e..048835eab8f02 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
@@ -217,7 +217,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -255,7 +255,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -300,7 +300,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -345,7 +345,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -383,7 +383,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -421,7 +421,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -459,7 +459,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -507,7 +507,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -555,7 +555,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -587,7 +587,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -619,7 +619,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -651,7 +651,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -683,7 +683,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -715,7 +715,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -754,7 +754,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -786,7 +786,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -825,7 +825,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -857,7 +857,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -889,7 +889,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -921,7 +921,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -953,7 +953,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -985,7 +985,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1017,7 +1017,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1049,7 +1049,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1091,7 +1091,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1123,7 +1123,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1165,7 +1165,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
index 11c569f5d817c..2aac90c6b292a 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
@@ -218,7 +218,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -256,7 +256,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -301,7 +301,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -346,7 +346,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -384,7 +384,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -460,7 +460,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -508,7 +508,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -556,7 +556,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -588,7 +588,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -620,7 +620,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -652,7 +652,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -684,7 +684,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -716,7 +716,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -755,7 +755,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -787,7 +787,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -826,7 +826,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -858,7 +858,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -890,7 +890,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -922,7 +922,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -954,7 +954,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -986,7 +986,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1018,7 +1018,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1050,7 +1050,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1092,7 +1092,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1124,7 +1124,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1166,7 +1166,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
index 63538de3dd8f1..826bf65ddf742 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
@@ -375,7 +375,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -519,7 +519,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -668,7 +668,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -817,7 +817,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -960,7 +960,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1045,7 +1045,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1129,7 +1129,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1276,7 +1276,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1364,7 +1364,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1429,7 +1429,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1458,7 +1458,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1487,7 +1487,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1532,7 +1532,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1561,7 +1561,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1590,7 +1590,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1635,7 +1635,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1670,7 +1670,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1699,7 +1699,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1744,7 +1744,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1779,7 +1779,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1808,7 +1808,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1853,7 +1853,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1882,7 +1882,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1911,7 +1911,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1956,7 +1956,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1985,7 +1985,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2014,7 +2014,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2088,7 +2088,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2117,7 +2117,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2162,7 +2162,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2194,7 +2194,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2223,7 +2223,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2268,7 +2268,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2300,7 +2300,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2329,7 +2329,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
index ef10723325ecc..05638d1faaaeb 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
@@ -218,7 +218,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -256,7 +256,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -301,7 +301,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -346,7 +346,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -384,7 +384,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -460,7 +460,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -509,7 +509,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -558,7 +558,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -590,7 +590,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -622,7 +622,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -654,7 +654,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -686,7 +686,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -718,7 +718,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -757,7 +757,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -789,7 +789,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -828,7 +828,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -860,7 +860,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -892,7 +892,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -924,7 +924,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -956,7 +956,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -988,7 +988,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1020,7 +1020,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1052,7 +1052,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1095,7 +1095,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1127,7 +1127,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1170,7 +1170,7 @@ void acc_compute() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/data.c b/clang/test/CIR/CodeGenOpenACC/data.c
index 4e13f17f4bfd7..e1267b107a280 100644
--- a/clang/test/CIR/CodeGenOpenACC/data.c
+++ b/clang/test/CIR/CodeGenOpenACC/data.c
@@ -18,7 +18,7 @@ void acc_data(int cond) {
   // CHECK-NEXT: cir.const
   // CHECK-NEXT: cir.store
   // CHECK-NEXT: cir.load
-  // CHECK-NEXT: cir.unary
+  // CHECK-NEXT: cir.inc
   // CHECK-NEXT: cir.store
   // CHECK-NEXT: acc.terminator
   // CHECK-NEXT: } attributes {defaultAttr = #acc<defaultvalue none>}
@@ -33,7 +33,7 @@ void acc_data(int cond) {
   // CHECK-NEXT: cir.const
   // CHECK-NEXT: cir.store
   // CHECK-NEXT: cir.load
-  // CHECK-NEXT: cir.unary
+  // CHECK-NEXT: cir.inc
   // CHECK-NEXT: cir.store
   // CHECK-NEXT: acc.terminator
   // CHECK-NEXT: } attributes {defaultAttr = #acc<defaultvalue present>}
diff --git a/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp b/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp
index ecf091ccfb269..8b3cabe24ef0b 100644
--- a/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/firstprivate-clause-recipes.cpp
@@ -79,7 +79,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -87,7 +87,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -95,7 +95,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -158,7 +158,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -166,7 +166,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -174,7 +174,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -214,7 +214,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -248,7 +248,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -281,7 +281,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -346,7 +346,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -354,7 +354,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -362,7 +362,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -427,7 +427,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -435,7 +435,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -443,7 +443,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -483,7 +483,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -521,7 +521,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -584,7 +584,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -592,7 +592,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -600,7 +600,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -664,7 +664,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -672,7 +672,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -680,7 +680,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
index df27896da217a..fc645f4bb2852 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp
@@ -113,7 +113,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -149,7 +149,7 @@ struct HasDtor {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
index fc6b6f8d5a07e..3d449d48bbfbf 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
@@ -616,7 +616,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -685,7 +685,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -793,7 +793,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -900,7 +900,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -957,7 +957,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1014,7 +1014,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1071,7 +1071,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1178,7 +1178,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1286,7 +1286,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1332,7 +1332,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1393,7 +1393,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1438,7 +1438,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1499,7 +1499,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1544,7 +1544,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1647,7 +1647,7 @@ void acc_loop() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1692,7 +1692,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1795,7 +1795,7 @@ void acc_loop() {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1834,7 +1834,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1922,7 +1922,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1971,7 +1971,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2010,7 +2010,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2104,7 +2104,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2205,7 +2205,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2250,7 +2250,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2351,7 +2351,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
index 4a28c59e8d11f..89830d0d68007 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
@@ -166,7 +166,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -204,7 +204,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -249,7 +249,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -294,7 +294,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -342,7 +342,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -390,7 +390,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -454,7 +454,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -486,7 +486,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -518,7 +518,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -550,7 +550,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -589,7 +589,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -621,7 +621,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -660,7 +660,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -692,7 +692,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -734,7 +734,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -766,7 +766,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -808,7 +808,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
index f38fa068e9f06..5af35bacaac92 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
@@ -376,7 +376,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -520,7 +520,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -669,7 +669,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -818,7 +818,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -961,7 +961,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1046,7 +1046,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1130,7 +1130,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1277,7 +1277,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1365,7 +1365,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1430,7 +1430,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1459,7 +1459,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1488,7 +1488,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1533,7 +1533,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1562,7 +1562,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1591,7 +1591,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1636,7 +1636,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1671,7 +1671,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1700,7 +1700,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1745,7 +1745,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1780,7 +1780,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1809,7 +1809,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1854,7 +1854,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1883,7 +1883,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1912,7 +1912,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1957,7 +1957,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1986,7 +1986,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2015,7 +2015,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2060,7 +2060,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2089,7 +2089,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2118,7 +2118,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2163,7 +2163,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2195,7 +2195,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2224,7 +2224,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2269,7 +2269,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2301,7 +2301,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2330,7 +2330,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
index d0c138a5647e6..eef4f79cd90b7 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
@@ -218,7 +218,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -256,7 +256,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -301,7 +301,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -346,7 +346,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -384,7 +384,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -422,7 +422,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -460,7 +460,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -508,7 +508,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -556,7 +556,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -588,7 +588,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -620,7 +620,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -652,7 +652,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -684,7 +684,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -716,7 +716,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -755,7 +755,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -787,7 +787,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -826,7 +826,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -858,7 +858,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -890,7 +890,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -922,7 +922,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -954,7 +954,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -986,7 +986,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1018,7 +1018,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1050,7 +1050,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1092,7 +1092,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1124,7 +1124,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1166,7 +1166,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
index 3dc1596be2da1..ef6f39ebe79e5 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
@@ -375,7 +375,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -519,7 +519,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -668,7 +668,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -817,7 +817,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -960,7 +960,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1045,7 +1045,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1129,7 +1129,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1276,7 +1276,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1364,7 +1364,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !s64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1429,7 +1429,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1458,7 +1458,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1487,7 +1487,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1532,7 +1532,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1561,7 +1561,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1590,7 +1590,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1635,7 +1635,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1670,7 +1670,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1699,7 +1699,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1744,7 +1744,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1779,7 +1779,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1808,7 +1808,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1853,7 +1853,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1882,7 +1882,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1911,7 +1911,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1956,7 +1956,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1985,7 +1985,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2014,7 +2014,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2059,7 +2059,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2088,7 +2088,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2117,7 +2117,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2162,7 +2162,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2194,7 +2194,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2223,7 +2223,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2268,7 +2268,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2300,7 +2300,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -2329,7 +2329,7 @@ void acc_loop() {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
index 2803aa1db0d7d..4402a274e9207 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp
@@ -36,7 +36,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -66,7 +66,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -164,7 +164,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -172,7 +172,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -221,7 +221,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -229,7 +229,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -344,7 +344,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -352,7 +352,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -360,7 +360,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -423,7 +423,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -431,7 +431,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -439,7 +439,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -511,7 +511,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -519,7 +519,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -584,7 +584,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -592,7 +592,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
index 3c14cad32e69f..38f478a6e2592 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp
@@ -31,7 +31,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -110,7 +110,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -118,7 +118,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -212,7 +212,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -220,7 +220,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -228,7 +228,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -300,7 +300,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -308,7 +308,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
index 59a5a9f73531e..c6409f377b4b1 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp
@@ -39,7 +39,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -65,7 +65,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -96,7 +96,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -145,7 +145,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -175,7 +175,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -216,7 +216,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -224,7 +224,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -270,7 +270,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -278,7 +278,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -328,7 +328,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -358,7 +358,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -388,7 +388,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -445,7 +445,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -453,7 +453,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -461,7 +461,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -525,7 +525,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -533,7 +533,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -541,7 +541,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -584,7 +584,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -614,7 +614,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -672,7 +672,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -714,7 +714,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -722,7 +722,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -769,7 +769,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -777,7 +777,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -828,7 +828,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -869,7 +869,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -877,7 +877,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -923,7 +923,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -931,7 +931,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -988,7 +988,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1018,7 +1018,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1074,7 +1074,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1082,7 +1082,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1090,7 +1090,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1154,7 +1154,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1162,7 +1162,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1170,7 +1170,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1220,7 +1220,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1270,7 +1270,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1300,7 +1300,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1356,7 +1356,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1364,7 +1364,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1372,7 +1372,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1436,7 +1436,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1444,7 +1444,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1452,7 +1452,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1495,7 +1495,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1524,7 +1524,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1581,7 +1581,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1589,7 +1589,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1654,7 +1654,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1662,7 +1662,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1714,7 +1714,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1752,7 +1752,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1807,7 +1807,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1815,7 +1815,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1823,7 +1823,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1887,7 +1887,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1895,7 +1895,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1903,7 +1903,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1946,7 +1946,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
index 786b8e6ef9d41..4fba35dea8fcf 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp
@@ -34,7 +34,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -60,7 +60,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -109,7 +109,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -139,7 +139,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -179,7 +179,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -187,7 +187,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -237,7 +237,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -267,7 +267,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -297,7 +297,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -353,7 +353,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -361,7 +361,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -369,7 +369,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -412,7 +412,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -442,7 +442,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -500,7 +500,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -541,7 +541,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -549,7 +549,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -602,7 +602,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -642,7 +642,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -650,7 +650,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -706,7 +706,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -736,7 +736,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -791,7 +791,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -799,7 +799,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -807,7 +807,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -857,7 +857,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -907,7 +907,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -937,7 +937,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -992,7 +992,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1000,7 +1000,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1008,7 +1008,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1051,7 +1051,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1081,7 +1081,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1137,7 +1137,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1145,7 +1145,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1197,7 +1197,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1235,7 +1235,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -1290,7 +1290,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1298,7 +1298,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1306,7 +1306,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -1349,7 +1349,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
index 062b3d11e3b5a..e5ed6ecd03a12 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp
@@ -32,7 +32,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -81,7 +81,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -111,7 +111,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -161,7 +161,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -191,7 +191,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -220,7 +220,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -263,7 +263,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -293,7 +293,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -350,7 +350,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -401,7 +401,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -458,7 +458,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -488,7 +488,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -538,7 +538,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -588,7 +588,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -619,7 +619,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -662,7 +662,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -692,7 +692,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -744,7 +744,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -782,7 +782,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -826,7 +826,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
index dad30e056bca2..fdccb3fcc04e1 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp
@@ -51,7 +51,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -91,7 +91,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -124,7 +124,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -169,7 +169,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -203,7 +203,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -236,7 +236,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -292,7 +292,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -300,7 +300,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -308,7 +308,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -373,7 +373,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -381,7 +381,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -389,7 +389,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -444,7 +444,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -483,7 +483,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -516,7 +516,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -560,7 +560,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -568,7 +568,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -617,7 +617,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -625,7 +625,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -679,7 +679,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -705,7 +705,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -735,7 +735,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
index 8f13d894c6b43..e88b04be9cdcb 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp
@@ -45,7 +45,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -84,7 +84,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -118,7 +118,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -161,7 +161,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -195,7 +195,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -229,7 +229,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -286,7 +286,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -294,7 +294,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -302,7 +302,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -359,7 +359,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -398,7 +398,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -431,7 +431,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -475,7 +475,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -483,7 +483,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -537,7 +537,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -563,7 +563,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
index 03c685427ad51..e881e9fedcf08 100644
--- a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp
@@ -43,7 +43,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -83,7 +83,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -115,7 +115,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -159,7 +159,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -193,7 +193,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -225,7 +225,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -282,7 +282,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -322,7 +322,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -355,7 +355,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -410,7 +410,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp b/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp
index b41a6b28b5395..5154620d2d3c8 100644
--- a/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/reduction-clause-recipes.cpp
@@ -67,7 +67,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -75,7 +75,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -83,7 +83,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -151,7 +151,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -159,7 +159,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -167,7 +167,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -230,7 +230,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -238,7 +238,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -246,7 +246,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -287,7 +287,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -321,7 +321,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -354,7 +354,7 @@ void do_things(unsigned A, unsigned B) {
 //
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -412,7 +412,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -420,7 +420,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -428,7 +428,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -498,7 +498,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -506,7 +506,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -514,7 +514,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -578,7 +578,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -586,7 +586,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -594,7 +594,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -635,7 +635,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield 
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -673,7 +673,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i 
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR_LOAD]] : !u64i 
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i> 
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } 
@@ -730,7 +730,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -738,7 +738,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -746,7 +746,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -822,7 +822,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -830,7 +830,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -838,7 +838,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.inc %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -902,7 +902,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR1_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -910,7 +910,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR2_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
@@ -918,7 +918,7 @@ void do_things(unsigned A, unsigned B) {
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: } step {
 // CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr<!u64i>, !u64i
-// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: %[[DEC:.*]] = cir.dec %[[ITR3_LOAD]] : !u64i
 // CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr<!u64i>
 // CHECK-NEXT: cir.yield
 // CHECK-NEXT: }
diff --git a/clang/test/CIR/IR/unary.cir b/clang/test/CIR/IR/unary.cir
index 0fdf1dcfdfb47..1ef1606807935 100644
--- a/clang/test/CIR/IR/unary.cir
+++ b/clang/test/CIR/IR/unary.cir
@@ -9,38 +9,38 @@ module {
   cir.func @test_unary_unsigned() {
     %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a"] {alignment = 4 : i64}
     %1 = cir.load %0 : !cir.ptr<!u32i>, !u32i
-    %2 = cir.unary(minus, %1) : !u32i, !u32i
-    %3 = cir.unary(not, %1) : !u32i, !u32i
-    %4 = cir.unary(inc, %1) : !u32i, !u32i
-    %5 = cir.unary(dec, %1) : !u32i, !u32i
+    %2 = cir.minus %1 : !u32i
+    %3 = cir.not %1 : !u32i
+    %4 = cir.inc %1 : !u32i
+    %5 = cir.dec %1 : !u32i
     cir.return
   }
 // CHECK: cir.func{{.*}} @test_unary_unsigned() {
 // CHECK:   %0 = cir.alloca !u32i, !cir.ptr<!u32i>, ["a"] {alignment = 4 : i64}
 // CHECK:   %1 = cir.load %0 : !cir.ptr<!u32i>, !u32i
-// CHECK:   %2 = cir.unary(minus, %1) : !u32i, !u32i
-// CHECK:   %3 = cir.unary(not, %1) : !u32i, !u32i
-// CHECK:   %4 = cir.unary(inc, %1) : !u32i, !u32i
-// CHECK:   %5 = cir.unary(dec, %1) : !u32i, !u32i
+// CHECK:   %2 = cir.minus %1 : !u32i
+// CHECK:   %3 = cir.not %1 : !u32i
+// CHECK:   %4 = cir.inc %1 : !u32i
+// CHECK:   %5 = cir.dec %1 : !u32i
 // CHECK:   cir.return
 // CHECK: }
 
   cir.func @test_unary_signed() {
     %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"] {alignment = 4 : i64}
     %1 = cir.load %0 : !cir.ptr<!s32i>, !s32i
-    %2 = cir.unary(minus, %1) nsw : !s32i, !s32i
-    %3 = cir.unary(not, %1) : !s32i, !s32i
-    %4 = cir.unary(inc, %1) nsw : !s32i, !s32i
-    %5 = cir.unary(dec, %1) nsw : !s32i, !s32i
+    %2 = cir.minus nsw %1 : !s32i
+    %3 = cir.not %1 : !s32i
+    %4 = cir.inc nsw %1 : !s32i
+    %5 = cir.dec nsw %1 : !s32i
     cir.return
   }
 // CHECK: cir.func{{.*}} @test_unary_signed() {
 // CHECK:   %0 = cir.alloca !s32i, !cir.ptr<!s32i>, ["a"] {alignment = 4 : i64}
 // CHECK:   %1 = cir.load %0 : !cir.ptr<!s32i>, !s32i
-// CHECK:   %2 = cir.unary(minus, %1) nsw : !s32i, !s32i
-// CHECK:   %3 = cir.unary(not, %1) : !s32i, !s32i
-// CHECK:   %4 = cir.unary(inc, %1) nsw : !s32i, !s32i
-// CHECK:   %5 = cir.unary(dec, %1) nsw : !s32i, !s32i
+// CHECK:   %2 = cir.minus nsw %1 : !s32i
+// CHECK:   %3 = cir.not %1 : !s32i
+// CHECK:   %4 = cir.inc nsw %1 : !s32i
+// CHECK:   %5 = cir.dec nsw %1 : !s32i
 // CHECK:   cir.return
 // CHECK: }
 }
diff --git a/clang/test/CIR/Transforms/canonicalize.cir b/clang/test/CIR/Transforms/canonicalize.cir
index 7476126818082..26493f7a37e39 100644
--- a/clang/test/CIR/Transforms/canonicalize.cir
+++ b/clang/test/CIR/Transforms/canonicalize.cir
@@ -64,8 +64,8 @@ module {
   // CHECK-NEXT: }
 
   cir.func @unary_not_not(%arg0: !cir.bool) -> !cir.bool {
-    %0 = cir.unary(not, %arg0) : !cir.bool, !cir.bool
-    %1 = cir.unary(not, %0) : !cir.bool, !cir.bool
+    %0 = cir.not %arg0 : !cir.bool
+    %1 = cir.not %0 : !cir.bool
     cir.return %1 : !cir.bool
   }
   // CHECK:      cir.func{{.*}} @unary_not_not(%arg0: !cir.bool) -> !cir.bool
@@ -73,7 +73,7 @@ module {
 
   cir.func @unary_poison() -> !s32i {
     %0 = cir.const #cir.poison : !s32i
-    %1 = cir.unary(inc, %0) : !s32i, !s32i
+    %1 = cir.inc %0 : !s32i
     cir.return %1 : !s32i
   }
   // CHECK:      @unary_poison
@@ -83,7 +83,7 @@ module {
 
   cir.func @unary_not_true() -> !cir.bool {
     %0 = cir.const #true
-    %1 = cir.unary(not, %0) : !cir.bool, !cir.bool
+    %1 = cir.not %0 : !cir.bool
     cir.return %1 : !cir.bool
   }
   // CHECK:      cir.func{{.*}} @unary_not_true() -> !cir.bool
@@ -92,7 +92,7 @@ module {
 
   cir.func @unary_not_false() -> !cir.bool {
     %0 = cir.const #false
-    %1 = cir.unary(not, %0) : !cir.bool, !cir.bool
+    %1 = cir.not %0 : !cir.bool
     cir.return %1 : !cir.bool
   }
   // CHECK:      cir.func{{.*}} @unary_not_false() -> !cir.bool
@@ -101,7 +101,7 @@ module {
 
   cir.func @unary_not_int() -> !s32i {
     %0 = cir.const #cir.int<1> : !s32i
-    %1 = cir.unary(not, %0) : !s32i, !s32i
+    %1 = cir.not %0 : !s32i
     cir.return %1 : !s32i
   }
   // CHECK:      cir.func{{.*}} @unary_not_int() -> !s32i
@@ -110,7 +110,7 @@ module {
 
   cir.func @unary_not_uint() -> !u32i {
     %0 = cir.const #cir.int<1> : !u32i
-    %1 = cir.unary(not, %0) : !u32i, !u32i
+    %1 = cir.not %0 : !u32i
     cir.return %1 : !u32i
   }
   // CHECK:      cir.func{{.*}} @unary_not_uint() -> !u32i
@@ -119,7 +119,7 @@ module {
 
   cir.func @unary_minus_true() -> !cir.bool {
     %0 = cir.const #true
-    %1 = cir.unary(minus, %0) : !cir.bool, !cir.bool
+    %1 = cir.minus %0 : !cir.bool
     cir.return %1 : !cir.bool
   }
   // CHECK:      cir.func{{.*}} @unary_minus_true() -> !cir.bool
@@ -128,7 +128,7 @@ module {
 
   cir.func @unary_minus_false() -> !cir.bool {
     %0 = cir.const #false
-    %1 = cir.unary(minus, %0) : !cir.bool, !cir.bool
+    %1 = cir.minus %0 : !cir.bool
     cir.return %1 : !cir.bool
   }
   // CHECK:      cir.func{{.*}} @unary_minus_false() -> !cir.bool
@@ -137,7 +137,7 @@ module {
 
   cir.func @unary_minus_int() -> !s32i {
     %0 = cir.const #cir.int<1> : !s32i
-    %1 = cir.unary(minus, %0) : !s32i, !s32i
+    %1 = cir.minus %0 : !s32i
     cir.return %1 : !s32i
   }
   // CHECK:      cir.func{{.*}} @unary_minus_int() -> !s32i
@@ -146,7 +146,7 @@ module {
 
   cir.func @unary_minus_uint() -> !u32i {
     %0 = cir.const #cir.int<1> : !u32i
-    %1 = cir.unary(minus, %0) : !u32i, !u32i
+    %1 = cir.minus %0 : !u32i
     cir.return %1 : !u32i
   }
   // CHECK:      cir.func{{.*}} @unary_minus_uint() -> !u32i
@@ -155,7 +155,7 @@ module {
 
   cir.func @unary_minus_float() -> !cir.float {
     %0 = cir.const #cir.fp<1.100000e+00> : !cir.float
-    %1 = cir.unary(minus, %0) : !cir.float, !cir.float
+    %1 = cir.minus %0 : !cir.float
     cir.return %1 : !cir.float
   }
   // CHECK:      cir.func{{.*}} @unary_minus_float() -> !cir.float
@@ -164,7 +164,7 @@ module {
 
   cir.func @unary_minus_double() -> !cir.double {
     %0 = cir.const #cir.fp<1.100000e+00> : !cir.double
-    %1 = cir.unary(minus, %0) : !cir.double, !cir.double
+    %1 = cir.minus %0 : !cir.double
     cir.return %1 : !cir.double
   }
   // CHECK:      cir.func{{.*}} @unary_minus_double() -> !cir.double
@@ -173,7 +173,7 @@ module {
 
   cir.func @unary_minus_nan() -> !cir.float {
     %0 = cir.const #cir.fp<0x7F800000> : !cir.float
-    %1 = cir.unary(minus, %0) : !cir.float, !cir.float
+    %1 = cir.minus %0 : !cir.float
     cir.return %1 : !cir.float
   }
   // CHECK:      cir.func{{.*}} @unary_minus_nan() -> !cir.float
@@ -182,7 +182,7 @@ module {
 
   cir.func @unary_minus_neg_nan() -> !cir.float {
     %0 = cir.const #cir.fp<0xFF800000> : !cir.float
-    %1 = cir.unary(minus, %0) : !cir.float, !cir.float
+    %1 = cir.minus %0 : !cir.float
     cir.return %1 : !cir.float
   }
   // CHECK:      cir.func{{.*}} @unary_minus_neg_nan() -> !cir.float
diff --git a/clang/test/CIR/Transforms/flatten-cleanup-scope-eh.cir b/clang/test/CIR/Transforms/flatten-cleanup-scope-eh.cir
index 35852eacebf34..9cd1170b4295d 100644
--- a/clang/test/CIR/Transforms/flatten-cleanup-scope-eh.cir
+++ b/clang/test/CIR/Transforms/flatten-cleanup-scope-eh.cir
@@ -247,7 +247,7 @@ cir.func @test_nrvo() -> !rec_NonTrivial {
     cir.yield
   } cleanup all {
     %4 = cir.load align(1) %1 : !cir.ptr<!cir.bool>, !cir.bool
-    %5 = cir.unary(not, %4) : !cir.bool, !cir.bool
+    %5 = cir.not %4 : !cir.bool
     cir.if %5 {
       cir.call @_ZN10NonTrivialD1Ev(%0) nothrow : (!cir.ptr<!rec_NonTrivial>) -> ()
     }
@@ -277,7 +277,7 @@ cir.func @test_nrvo() -> !rec_NonTrivial {
 // Normal cleanup: inlined cleanup region with flattened cir.if.
 // CHECK:       ^[[NORMAL_CLEANUP]]:
 // CHECK:         %[[N_FLAG:.*]] = cir.load align(1) %[[NRVO]] : !cir.ptr<!cir.bool>, !cir.bool
-// CHECK:         %[[N_NOT:.*]] = cir.unary(not, %[[N_FLAG]]) : !cir.bool, !cir.bool
+// CHECK:         %[[N_NOT:.*]] = cir.not %[[N_FLAG]] : !cir.bool
 // CHECK:         cir.brcond %[[N_NOT]] ^[[N_IF_TRUE:bb[0-9]+]], ^[[N_MERGE:bb[0-9]+]]
 //
 // CHECK:       ^[[N_IF_TRUE]]:
@@ -299,7 +299,7 @@ cir.func @test_nrvo() -> !rec_NonTrivial {
 // CHECK:       ^[[EH_CLEANUP]](%[[ET:.*]]: !cir.eh_token):
 // CHECK:         %[[CT:.*]] = cir.begin_cleanup %[[ET]] : !cir.eh_token -> !cir.cleanup_token
 // CHECK:         %[[EH_FLAG:.*]] = cir.load align(1) %[[NRVO]] : !cir.ptr<!cir.bool>, !cir.bool
-// CHECK:         %[[EH_NOT:.*]] = cir.unary(not, %[[EH_FLAG]]) : !cir.bool, !cir.bool
+// CHECK:         %[[EH_NOT:.*]] = cir.not %[[EH_FLAG]] : !cir.bool
 // CHECK:         cir.brcond %[[EH_NOT]] ^[[EH_IF_TRUE:bb[0-9]+]], ^[[EH_MERGE:bb[0-9]+]]
 //
 // EH cleanup if-true: call destructor.
diff --git a/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir b/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir
index 7a7b51005c2d3..490c287ab9696 100644
--- a/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir
+++ b/clang/test/CIR/Transforms/flatten-cleanup-scope-multi-exit.cir
@@ -501,7 +501,7 @@ cir.func @test_return_computed_value(%arg0: !s32i) -> !s32i {
       %cond = cir.call @shouldReturn() : () -> !cir.bool
       cir.brcond %cond ^bb_return, ^bb_normal
     ^bb_return:
-      %retval = cir.unary(not, %arg0) : !s32i, !s32i
+      %retval = cir.not %arg0 : !s32i
       cir.return %retval : !s32i
     ^bb_normal:
       cir.yield
@@ -533,7 +533,7 @@ cir.func @test_return_computed_value(%arg0: !s32i) -> !s32i {
 // CHECK:         %[[COND:.*]] = cir.call @shouldReturn()
 // CHECK:         cir.brcond %[[COND]] ^[[RET_PATH:bb[0-9]+]], ^[[YIELD_PATH:bb[0-9]+]]
 // CHECK:       ^[[RET_PATH]]:
-// CHECK:         %[[COMPUTED:.*]] = cir.unary(not, %[[ARG0]]) : !s32i, !s32i
+// CHECK:         %[[COMPUTED:.*]] = cir.not %[[ARG0]] : !s32i
 // CHECK:         cir.store %[[COMPUTED]], %[[RET_TMP]]
 // CHECK:         %[[RET_ID:.*]] = cir.const #cir.int<1> : !s32i
 // CHECK:         cir.store %[[RET_ID]], %[[DEST_SLOT]]
diff --git a/clang/test/CIR/Transforms/select.cir b/clang/test/CIR/Transforms/select.cir
index 0ad5c43178831..413b6cc668a7c 100644
--- a/clang/test/CIR/Transforms/select.cir
+++ b/clang/test/CIR/Transforms/select.cir
@@ -54,7 +54,7 @@ module {
   }
 
   //      CHECK: cir.func{{.*}} @simplify_2(%[[ARG0:.+]]: !cir.bool) -> !cir.bool {
-  // CHECK-NEXT:   %[[#A:]] = cir.unary(not, %[[ARG0]]) : !cir.bool, !cir.bool
+  // CHECK-NEXT:   %[[#A:]] = cir.not %[[ARG0]] : !cir.bool
   // CHECK-NEXT:   cir.return %[[#A]] : !cir.bool
   // CHECK-NEXT: }
 }



More information about the llvm-branch-commits mailing list