[libcxx-commits] [clang] [compiler-rt] [flang] [libcxx] [lldb] [llvm] [mlir] [DAGCombiner] Preserve debug location of original load in fold (conv (load x)) (PR #160236)

via libcxx-commits libcxx-commits at lists.llvm.org
Tue Sep 23 04:36:11 PDT 2025


Andrzej =?utf-8?q?Warzyński?= <andrzej.warzynski at arm.com>,
Andrzej =?utf-8?q?Warzyński?= <andrzej.warzynski at arm.com>,
Andrzej =?utf-8?q?Warzyński?= <andrzej.warzynski at arm.com>
Message-ID:
In-Reply-To: <llvm.org/llvm/llvm-project/pull/160236 at github.com>


https://github.com/jwu10003 updated https://github.com/llvm/llvm-project/pull/160236

>From 8ce1b4a005482a1a395ea9b2f5ebe6d7c067be95 Mon Sep 17 00:00:00 2001
From: "jian.wu" <jian.wu at amd.com>
Date: Tue, 23 Sep 2025 12:07:14 +0800
Subject: [PATCH 01/42] [DAGCombiner] Preserve debug location of original load
 in fold (conv (load x))

This patch fixes a debug information loss issue during the combine of a conversion (e.g., bitcast)
with a load into a new load: `fold (conv (load x)) -> (load (conv*)x)`.

The newly created load node was incorrectly using the debug location (`SDLoc`) of the conversion
operation (the `conv` node, `N`) instead of the location of the original load operation (the `load`
node, `LN0`). The location of the conversion operation often points to compiler-internal
instructions and provides little value for source-level debugging. In contrast, the original load's
location accurately represents the source of the data access in the user's code.

This change ensures the new load inherits the debug location from `LN0` by using `SDLoc(LN0)`,
which improves debugging experience and fixes a test case failure observed in the Triton compiler.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  2 +-
 llvm/test/CodeGen/AMDGPU/combine-conv-load.ll | 41 +++++++++++++++++++
 2 files changed, 42 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/combine-conv-load.ll

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a6ba6e518899f..4cb0a35aa7b25 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16703,7 +16703,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
         }
       }
       SDValue Load =
-          DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
+          DAG.getLoad(VT, SDLoc(LN0), LN0->getChain(), LN0->getBasePtr(),
                       LN0->getMemOperand());
       DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
       return Load;
diff --git a/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll b/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll
new file mode 100644
index 0000000000000..900c973b712ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942  < %s | FileCheck %s
+
+; CHECK-LABEL:  test:
+; CHECK:        .loc    1 8 16                          ; test.py:8:16
+; CHECK-NEXT:   s_load_dword
+
+; Function Attrs: alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
+define amdgpu_kernel void @test(ptr addrspace(1) inreg readonly captures(none) %0, ptr addrspace(1) inreg writeonly captures(none) %1, ptr addrspace(1) inreg readnone captures(none) %2, ptr addrspace(1) inreg readnone captures(none) %3) local_unnamed_addr #0 !dbg !4 {
+  %5 = tail call i32 @llvm.amdgcn.workitem.id.x(), !dbg !7
+  %6 = and i32 %5, 255, !dbg !7
+  %7 = icmp eq i32 %6, 0, !dbg !7
+  br i1 %7, label %8, label %10, !dbg !7
+
+8:                                                ; preds = %4
+  %9 = load <1 x float>, ptr addrspace(1) %0, align 4, !dbg !8, !amdgpu.noclobber !6
+  store <1 x float> %9, ptr addrspace(1) %1, align 4, !dbg !7
+  br label %10, !dbg !7
+
+10:                                               ; preds = %8, %4
+  ret void, !dbg !9
+}
+
+; Function Attrs: alwaysinline nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) "amdgpu-agpr-alloc"="0" "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "denormal-fp-math-f32"="ieee" "uniform-work-group-size"="false" }
+attributes #1 = { alwaysinline nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "triton", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly)
+!1 = !DIFile(filename: "test.py", directory: "/path")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = !{i32 1, !"amdhsa_code_object_version", i32 500}
+!4 = distinct !DISubprogram(name: "test", linkageName: "test", scope: !1, file: !1, line: 7, type: !5, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!5 = !DISubroutineType(cc: DW_CC_normal, types: !6)
+!6 = !{}
+!7 = !DILocation(line: 9, column: 20, scope: !4)
+!8 = !DILocation(line: 8, column: 16, scope: !4)
+!9 = !DILocation(line: 9, column: 4, scope: !4)

>From 39959198ad713668629ccc379ef3b3486b1fa421 Mon Sep 17 00:00:00 2001
From: "jian.wu" <jian.wu at amd.com>
Date: Tue, 23 Sep 2025 17:44:25 +0800
Subject: [PATCH 02/42] update test case

---
 llvm/test/CodeGen/AMDGPU/combine-conv-load.ll | 41 -------------------
 .../DebugInfo/AMDGPU/combine-conv-load.ll     | 26 ++++++++++++
 2 files changed, 26 insertions(+), 41 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/combine-conv-load.ll
 create mode 100644 llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll

diff --git a/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll b/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll
deleted file mode 100644
index 900c973b712ae..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/combine-conv-load.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc -mtriple=amdgcn -mcpu=gfx942  < %s | FileCheck %s
-
-; CHECK-LABEL:  test:
-; CHECK:        .loc    1 8 16                          ; test.py:8:16
-; CHECK-NEXT:   s_load_dword
-
-; Function Attrs: alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define amdgpu_kernel void @test(ptr addrspace(1) inreg readonly captures(none) %0, ptr addrspace(1) inreg writeonly captures(none) %1, ptr addrspace(1) inreg readnone captures(none) %2, ptr addrspace(1) inreg readnone captures(none) %3) local_unnamed_addr #0 !dbg !4 {
-  %5 = tail call i32 @llvm.amdgcn.workitem.id.x(), !dbg !7
-  %6 = and i32 %5, 255, !dbg !7
-  %7 = icmp eq i32 %6, 0, !dbg !7
-  br i1 %7, label %8, label %10, !dbg !7
-
-8:                                                ; preds = %4
-  %9 = load <1 x float>, ptr addrspace(1) %0, align 4, !dbg !8, !amdgpu.noclobber !6
-  store <1 x float> %9, ptr addrspace(1) %1, align 4, !dbg !7
-  br label %10, !dbg !7
-
-10:                                               ; preds = %8, %4
-  ret void, !dbg !9
-}
-
-; Function Attrs: alwaysinline nocallback nofree nosync nounwind speculatable willreturn memory(none)
-declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #1
-
-attributes #0 = { alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) "amdgpu-agpr-alloc"="0" "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "denormal-fp-math-f32"="ieee" "uniform-work-group-size"="false" }
-attributes #1 = { alwaysinline nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!2, !3}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "triton", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly)
-!1 = !DIFile(filename: "test.py", directory: "/path")
-!2 = !{i32 2, !"Debug Info Version", i32 3}
-!3 = !{i32 1, !"amdhsa_code_object_version", i32 500}
-!4 = distinct !DISubprogram(name: "test", linkageName: "test", scope: !1, file: !1, line: 7, type: !5, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
-!5 = !DISubroutineType(cc: DW_CC_normal, types: !6)
-!6 = !{}
-!7 = !DILocation(line: 9, column: 20, scope: !4)
-!8 = !DILocation(line: 8, column: 16, scope: !4)
-!9 = !DILocation(line: 9, column: 4, scope: !4)
diff --git a/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll b/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll
new file mode 100644
index 0000000000000..14ce1d9cba098
--- /dev/null
+++ b/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck %s
+
+; CHECK-LABEL:  test:
+; CHECK:        .loc    1 8 16 prologue_end             ; test.py:8:16
+; CHECK-NEXT:   s_load_dword
+
+define void @test(ptr addrspace(1) inreg readonly captures(none) %0, ptr addrspace(1) inreg writeonly captures(none) %1) local_unnamed_addr !dbg !4 {
+  %3 = load <1 x float>, ptr addrspace(1) %0, align 4, !dbg !8, !amdgpu.noclobber !6
+  store <1 x float> %3, ptr addrspace(1) %1, align 4, !dbg !7
+
+  ret void, !dbg !9
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "triton", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly)
+!1 = !DIFile(filename: "test.py", directory: "/path")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = !{i32 1, !"amdhsa_code_object_version", i32 500}
+!4 = distinct !DISubprogram(name: "test", linkageName: "test", scope: !1, file: !1, line: 7, type: !5, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!5 = !DISubroutineType(cc: DW_CC_normal, types: !6)
+!6 = !{}
+!7 = !DILocation(line: 9, column: 20, scope: !4)
+!8 = !DILocation(line: 8, column: 16, scope: !4)
+!9 = !DILocation(line: 9, column: 4, scope: !4)

>From c255ce47fe1b4050a3c900fddfb789b0e3b6496d Mon Sep 17 00:00:00 2001
From: "jian.wu" <jian.wu at amd.com>
Date: Tue, 23 Sep 2025 19:14:30 +0800
Subject: [PATCH 03/42] Simplify the test case

---
 llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll b/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll
index 14ce1d9cba098..0bb3d383248fb 100644
--- a/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll
+++ b/llvm/test/DebugInfo/AMDGPU/combine-conv-load.ll
@@ -4,10 +4,9 @@
 ; CHECK:        .loc    1 8 16 prologue_end             ; test.py:8:16
 ; CHECK-NEXT:   s_load_dword
 
-define void @test(ptr addrspace(1) inreg readonly captures(none) %0, ptr addrspace(1) inreg writeonly captures(none) %1) local_unnamed_addr !dbg !4 {
-  %3 = load <1 x float>, ptr addrspace(1) %0, align 4, !dbg !8, !amdgpu.noclobber !6
-  store <1 x float> %3, ptr addrspace(1) %1, align 4, !dbg !7
-
+define void @test(ptr addrspace(1) inreg readonly captures(none) %arg0, ptr addrspace(1) inreg writeonly captures(none) %arg1) local_unnamed_addr !dbg !4 {
+  %ld = load <1 x float>, ptr addrspace(1) %arg0, align 4, !dbg !8, !amdgpu.noclobber !6
+  store <1 x float> %ld, ptr addrspace(1) %arg1, align 4, !dbg !7
   ret void, !dbg !9
 }
 

>From f9f4cdd1c16c6deab5bc95d2fdb612a65090471d Mon Sep 17 00:00:00 2001
From: Helena Kotas <hekotas at microsoft.com>
Date: Mon, 22 Sep 2025 21:37:56 -0700
Subject: [PATCH 04/42] [DirectX] NonUniformResourceIndex lowering (#159608)

Introduces `llvm.{dx|svp}.resource.nonuniformindex` intrinsic that will be used when a resource index is not guaranteed to be uniform across threads (HLSL function NonUniformResourceIndex).

The DXIL lowering layer looks for this intrinsic call in the resource index calculation, makes sure it is reflected in the NonUniform flag on DXIL create handle ops (`dx.op.createHandle` and `dx.op.createHandleFromBinding`), and then removes it from the module.

Closes #155701
---
 llvm/include/llvm/IR/IntrinsicsDirectX.td     |  4 +
 llvm/include/llvm/IR/IntrinsicsSPIRV.td       |  3 +
 llvm/lib/Target/DirectX/DXILOpLowering.cpp    | 68 +++++++++++++---
 .../test/CodeGen/DirectX/CreateHandle-NURI.ll | 70 +++++++++++++++++
 .../DirectX/CreateHandleFromBinding-NURI.ll   | 77 +++++++++++++++++++
 5 files changed, 212 insertions(+), 10 deletions(-)
 create mode 100644 llvm/test/CodeGen/DirectX/CreateHandle-NURI.ll
 create mode 100644 llvm/test/CodeGen/DirectX/CreateHandleFromBinding-NURI.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index e60e07801455f..d27d42841e012 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -39,6 +39,10 @@ def int_dx_resource_handlefromimplicitbinding
 def int_dx_resource_getpointer
     : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [llvm_any_ty, llvm_i32_ty],
                             [IntrNoMem]>;
+
+def int_dx_resource_nonuniformindex
+    : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
 def int_dx_resource_load_typedbuffer
     : DefaultAttrsIntrinsic<[llvm_any_ty, llvm_i1_ty],
                             [llvm_any_ty, llvm_i32_ty], [IntrReadMem]>;
diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
index 0b0c2b137e55b..b89fa87bf77b9 100644
--- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td
+++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td
@@ -161,6 +161,9 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]
       : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [llvm_any_ty, llvm_i32_ty],
                               [IntrNoMem]>;
 
+def int_spv_resource_nonuniformindex
+      : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
   // Read a value from the image buffer. It does not translate directly to a
   // single OpImageRead because the result type is not necessarily a 4 element
   // vector.
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index 577b4624458b9..610d8b63bba27 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -16,6 +16,7 @@
 #include "llvm/Analysis/DXILMetadataAnalysis.h"
 #include "llvm/Analysis/DXILResource.h"
 #include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Constant.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Instruction.h"
@@ -24,6 +25,7 @@
 #include "llvm/IR/IntrinsicsDirectX.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/IR/Use.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -42,6 +44,7 @@ class OpLowerer {
   DXILResourceTypeMap &DRTM;
   const ModuleMetadataInfo &MMDI;
   SmallVector<CallInst *> CleanupCasts;
+  Function *CleanupNURI = nullptr;
 
 public:
   OpLowerer(Module &M, DXILResourceMap &DRM, DXILResourceTypeMap &DRTM,
@@ -195,6 +198,21 @@ class OpLowerer {
     CleanupCasts.clear();
   }
 
+  void cleanupNonUniformResourceIndexCalls() {
+    // Replace all NonUniformResourceIndex calls with their argument.
+    if (!CleanupNURI)
+      return;
+    for (User *U : make_early_inc_range(CleanupNURI->users())) {
+      CallInst *CI = dyn_cast<CallInst>(U);
+      if (!CI)
+        continue;
+      CI->replaceAllUsesWith(CI->getArgOperand(0));
+      CI->eraseFromParent();
+    }
+    CleanupNURI->eraseFromParent();
+    CleanupNURI = nullptr;
+  }
+
   // Remove the resource global associated with the handleFromBinding call
   // instruction and their uses as they aren't needed anymore.
   // TODO: We should verify that all the globals get removed.
@@ -229,6 +247,31 @@ class OpLowerer {
       NameGlobal->removeFromParent();
   }
 
+  bool hasNonUniformIndex(Value *IndexOp) {
+    if (isa<llvm::Constant>(IndexOp))
+      return false;
+
+    SmallVector<Value *> WorkList;
+    WorkList.push_back(IndexOp);
+
+    while (!WorkList.empty()) {
+      Value *V = WorkList.pop_back_val();
+      if (auto *CI = dyn_cast<CallInst>(V)) {
+        if (CI->getCalledFunction()->getIntrinsicID() ==
+            Intrinsic::dx_resource_nonuniformindex)
+          return true;
+      }
+      if (auto *U = llvm::dyn_cast<llvm::User>(V)) {
+        for (llvm::Value *Op : U->operands()) {
+          if (isa<llvm::Constant>(Op))
+            continue;
+          WorkList.push_back(Op);
+        }
+      }
+    }
+    return false;
+  }
+
   [[nodiscard]] bool lowerToCreateHandle(Function &F) {
     IRBuilder<> &IRB = OpBuilder.getIRB();
     Type *Int8Ty = IRB.getInt8Ty();
@@ -250,13 +293,12 @@ class OpLowerer {
         IndexOp = IRB.CreateAdd(IndexOp,
                                 ConstantInt::get(Int32Ty, Binding.LowerBound));
 
-      // FIXME: The last argument is a NonUniform flag which needs to be set
-      // based on resource analysis.
-      // https://github.com/llvm/llvm-project/issues/155701
+      bool HasNonUniformIndex =
+          (Binding.Size == 1) ? false : hasNonUniformIndex(IndexOp);
       std::array<Value *, 4> Args{
           ConstantInt::get(Int8Ty, llvm::to_underlying(RC)),
           ConstantInt::get(Int32Ty, Binding.RecordID), IndexOp,
-          ConstantInt::get(Int1Ty, false)};
+          ConstantInt::get(Int1Ty, HasNonUniformIndex)};
       Expected<CallInst *> OpCall =
           OpBuilder.tryCreateOp(OpCode::CreateHandle, Args, CI->getName());
       if (Error E = OpCall.takeError())
@@ -300,11 +342,10 @@ class OpLowerer {
                                 : Binding.LowerBound + Binding.Size - 1;
       Constant *ResBind = OpBuilder.getResBind(Binding.LowerBound, UpperBound,
                                                Binding.Space, RC);
-      // FIXME: The last argument is a NonUniform flag which needs to be set
-      // based on resource analysis.
-      // https://github.com/llvm/llvm-project/issues/155701
-      Constant *NonUniform = ConstantInt::get(Int1Ty, false);
-      std::array<Value *, 3> BindArgs{ResBind, IndexOp, NonUniform};
+      bool NonUniformIndex =
+          (Binding.Size == 1) ? false : hasNonUniformIndex(IndexOp);
+      Constant *NonUniformOp = ConstantInt::get(Int1Ty, NonUniformIndex);
+      std::array<Value *, 3> BindArgs{ResBind, IndexOp, NonUniformOp};
       Expected<CallInst *> OpBind = OpBuilder.tryCreateOp(
           OpCode::CreateHandleFromBinding, BindArgs, CI->getName());
       if (Error E = OpBind.takeError())
@@ -868,6 +909,11 @@ class OpLowerer {
       case Intrinsic::dx_resource_getpointer:
         HasErrors |= lowerGetPointer(F);
         break;
+      case Intrinsic::dx_resource_nonuniformindex:
+        assert(!CleanupNURI &&
+               "overloaded llvm.dx.resource.nonuniformindex intrinsics?");
+        CleanupNURI = &F;
+        break;
       case Intrinsic::dx_resource_load_typedbuffer:
         HasErrors |= lowerTypedBufferLoad(F, /*HasCheckBit=*/true);
         break;
@@ -908,8 +954,10 @@ class OpLowerer {
       }
       Updated = true;
     }
-    if (Updated && !HasErrors)
+    if (Updated && !HasErrors) {
       cleanupHandleCasts();
+      cleanupNonUniformResourceIndexCalls();
+    }
 
     return Updated;
   }
diff --git a/llvm/test/CodeGen/DirectX/CreateHandle-NURI.ll b/llvm/test/CodeGen/DirectX/CreateHandle-NURI.ll
new file mode 100644
index 0000000000000..cfa6c983df3f4
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/CreateHandle-NURI.ll
@@ -0,0 +1,70 @@
+; RUN: opt -S -passes=dxil-op-lower %s | FileCheck %s
+
+target triple = "dxil-pc-shadermodel6.0-compute"
+
+ at A.str = internal unnamed_addr constant [2 x i8] c"A\00", align 1
+ at B.str = internal unnamed_addr constant [2 x i8] c"A\00", align 1
+
+declare i32 @some_val();
+
+define void @test_buffers_with_nuri() {
+
+  %val = call i32 @some_val()
+  %foo = alloca i32, align 4
+
+  ; RWBuffer<float> A[10];
+  ;
+  ; A[NonUniformResourceIndex(val)];
+
+  %nuri1 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  %res1 = call target("dx.TypedBuffer", float, 1, 0, 0)
+            @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %nuri1, ptr @A.str)
+  ; CHECK: call %dx.types.Handle @dx.op.createHandle(i32 57, i8 1, i32 0, i32 %val, i1 true) #[[ATTR:.*]]
+  ; CHECK-NOT: @llvm.dx.cast.handle
+  ; CHECK-NOT: @llvm.dx.resource.nonuniformindex
+
+  ; A[NonUniformResourceIndex(val + 1) % 10];
+  %add1 = add i32 %val, 1
+  %nuri2 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %add1)
+  %rem1 = urem i32 %nuri2, 10
+  %res2 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %rem1, ptr @A.str)
+  ; CHECK: call %dx.types.Handle @dx.op.createHandle(i32 57, i8 1, i32 0, i32 %rem1, i1 true) #[[ATTR]]
+
+  ; A[10 + 3 * NonUniformResourceIndex(GI)];
+  %mul1 = mul i32 %nuri1, 3
+  %add2 = add i32 %mul1, 10
+  %res3 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %add2, ptr @A.str)
+  ; CHECK: call %dx.types.Handle @dx.op.createHandle(i32 57, i8 1, i32 0, i32 %add2, i1 true) #[[ATTR]]
+
+  ; NonUniformResourceIndex value going through store & load - the flag is not going to get picked up:
+  %a = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  store i32 %a, ptr %foo
+  %b = load i32, ptr %foo
+  %res4 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %b, ptr @A.str)
+  ; CHECK: call %dx.types.Handle @dx.op.createHandle(i32 57, i8 1, i32 0, i32 %b, i1 false) #[[ATTR]]
+
+  ; NonUniformResourceIndex index value on a single resouce (not an array) - the flag is not going to get picked up:
+  ;
+  ; RWBuffer<float> B : register(u20);
+  ; B[NonUniformResourceIndex(val)];
+  %nuri3 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  %res5 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 20, i32 0, i32 1, i32 %nuri1, ptr @B.str)
+  ; CHECK: call %dx.types.Handle @dx.op.createHandle(i32 57, i8 1, i32 1, i32 %val, i1 false) #[[ATTR]]
+
+  ; NonUniformResourceIndex on unrelated value - the call is removed:
+  ; foo = NonUniformResourceIndex(val);
+  %nuri4 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  store i32 %nuri4, ptr %foo
+  ; CHECK: store i32 %val, ptr %foo
+  ; CHECK-NOT: @llvm.dx.resource.nonuniformindex
+
+  ret void
+}
+
+; CHECK: attributes #[[ATTR]] = {{{.*}} memory(read) {{.*}}}
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
diff --git a/llvm/test/CodeGen/DirectX/CreateHandleFromBinding-NURI.ll b/llvm/test/CodeGen/DirectX/CreateHandleFromBinding-NURI.ll
new file mode 100644
index 0000000000000..80bf5a6a67c91
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/CreateHandleFromBinding-NURI.ll
@@ -0,0 +1,77 @@
+; RUN: opt -S -passes=dxil-op-lower %s | FileCheck %s
+
+target triple = "dxil-pc-shadermodel6.6-compute"
+
+ at A.str = internal unnamed_addr constant [2 x i8] c"A\00", align 1
+ at B.str = internal unnamed_addr constant [2 x i8] c"A\00", align 1
+
+declare i32 @some_val();
+
+define void @test_buffers_with_nuri() {
+
+  %val = call i32 @some_val()
+  %foo = alloca i32, align 4
+
+  ; RWBuffer<float> A[10];
+  ;
+  ; A[NonUniformResourceIndex(val)];
+
+  %nuri1 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  %res1 = call target("dx.TypedBuffer", float, 1, 0, 0) 
+            @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %nuri1, ptr @A.str)
+  ; CHECK: %[[RES1:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 9, i32 0, i8 1 }, i32 %val, i1 true) #[[ATTR:.*]]
+  ; CHECK: call  %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[RES1]], %dx.types.ResourceProperties { i32 4106, i32 265 }) #[[ATTR]]
+  ; CHECK-NOT: @llvm.dx.cast.handle
+  ; CHECK-NOT: @llvm.dx.resource.nonuniformindex
+
+  ; A[NonUniformResourceIndex(val + 1) % 10];
+  %add1 = add i32 %val, 1
+  %nuri2 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %add1)
+  %rem1 = urem i32 %nuri2, 10
+  %res2 = call target("dx.TypedBuffer", float, 1, 0, 0) 
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %rem1, ptr @A.str)
+  ; CHECK: %[[RES2:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 9, i32 0, i8 1 }, i32 %rem1, i1 true) #[[ATTR]]
+  ; CHECK: call %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[RES2]], %dx.types.ResourceProperties { i32 4106, i32 265 }) #[[ATTR]]
+
+  ; A[10 + 3 * NonUniformResourceIndex(GI)];
+  %mul1 = mul i32 %nuri1, 3
+  %add2 = add i32 %mul1, 10
+  %res3 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %add2, ptr @A.str)
+  ; CHECK: %[[RES3:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 9, i32 0, i8 1 }, i32 %add2, i1 true) #[[ATTR]]
+  ; CHECK: %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[RES3]], %dx.types.ResourceProperties { i32 4106, i32 265 }) #[[ATTR]]
+  ret void
+
+  ; NonUniformResourceIndex value going through store & load: the flag is not going to get picked up
+  %a = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  store i32 %a, ptr %foo
+  %b = load i32, ptr %foo
+  %res4 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 10, i32 %b, ptr @A.str)
+  ; CHECK: %[[RES4:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 9, i32 0, i8 1 }, i32 %b, i1 false) #[[ATTR]]
+  ; CHECK: %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[RES4]], %dx.types.ResourceProperties { i32 4106, i32 265 }) #[[ATTR]]
+
+  ; NonUniformResourceIndex index value on a single resouce (not an array): the flag is not going to get picked up
+  ; RWBuffer<float> B : register(u20);
+  ;
+  ; B[NonUniformResourceIndex(val)];
+
+  %nuri3 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  %res5 = call target("dx.TypedBuffer", float, 1, 0, 0)
+           @llvm.dx.resource.handlefrombinding(i32 20, i32 0, i32 1, i32 %nuri1, ptr @B.str)
+  ; CHECK: %[[RES4:.*]] = call %dx.types.Handle @dx.op.createHandleFromBinding(i32 217, %dx.types.ResBind { i32 0, i32 0, i32 20, i8 1 }, i32 %val, i1 false) #[[ATTR]]
+  ; CHECK: %dx.types.Handle @dx.op.annotateHandle(i32 216, %dx.types.Handle %[[RES4]], %dx.types.ResourceProperties { i32 4106, i32 265 }) #[[ATTR]]
+
+  ; NonUniformResourceIndex on unrelated value - the call is removed:
+  ; foo = NonUniformResourceIndex(val);
+  %nuri4 = tail call noundef i32 @llvm.dx.resource.nonuniformindex(i32 %val)
+  store i32 %nuri4, ptr %foo
+  ; CHECK: store i32 %val, ptr %foo
+  ; CHECK-NOT: @llvm.dx.resource.nonuniformindex
+
+  ret void
+}
+
+; CHECK: attributes #[[ATTR]] = {{{.*}} memory(none) {{.*}}}
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }

>From 628abab699a1097ecf057debe3b9ad2e27597c31 Mon Sep 17 00:00:00 2001
From: Kareem Ergawy <kareem.ergawy at amd.com>
Date: Tue, 23 Sep 2025 07:27:21 +0200
Subject: [PATCH 05/42] [flang][OpenMP] `do concurrent`: support `local` on
 device (#157638)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Extends support for mapping `do concurrent` on the device by adding
support for `local` specifiers. The changes in this PR map the local
variable to the `omp.target` op and uses the mapped value as the
`private` clause operand in the nested `omp.parallel` op.

- https://github.com/llvm/llvm-project/pull/155754
- https://github.com/llvm/llvm-project/pull/155987
- https://github.com/llvm/llvm-project/pull/155992
- https://github.com/llvm/llvm-project/pull/155993
- https://github.com/llvm/llvm-project/pull/157638 ◀️
- https://github.com/llvm/llvm-project/pull/156610
- https://github.com/llvm/llvm-project/pull/156837
---
 .../include/flang/Optimizer/Dialect/FIROps.td |  12 ++
 .../OpenMP/DoConcurrentConversion.cpp         | 200 +++++++++++-------
 .../Transforms/DoConcurrent/local_device.mlir |  49 +++++
 3 files changed, 183 insertions(+), 78 deletions(-)
 create mode 100644 flang/test/Transforms/DoConcurrent/local_device.mlir

diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index bc971e8fd6600..fc6eedc6ed4c6 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -3894,6 +3894,18 @@ def fir_DoConcurrentLoopOp : fir_Op<"do_concurrent.loop",
       return getReduceVars().size();
     }
 
+    unsigned getInductionVarsStart() {
+      return 0;
+    }
+
+    unsigned getLocalOperandsStart() {
+      return getNumInductionVars();
+    }
+
+    unsigned getReduceOperandsStart() {
+      return getLocalOperandsStart() + getNumLocalOperands();
+    }
+
     mlir::Block::BlockArgListType getInductionVars() {
       return getBody()->getArguments().slice(0, getNumInductionVars());
     }
diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
index 6c71924000842..fb99623128621 100644
--- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
+++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
@@ -138,6 +138,9 @@ void collectLoopLiveIns(fir::DoConcurrentLoopOp loop,
 
         liveIns.push_back(operand->get());
       });
+
+  for (mlir::Value local : loop.getLocalVars())
+    liveIns.push_back(local);
 }
 
 /// Collects values that are local to a loop: "loop-local values". A loop-local
@@ -298,8 +301,7 @@ class DoConcurrentConversion
               .getIsTargetDevice();
 
       mlir::omp::TargetOperands targetClauseOps;
-      genLoopNestClauseOps(doLoop.getLoc(), rewriter, loop, mapper,
-                           loopNestClauseOps,
+      genLoopNestClauseOps(doLoop.getLoc(), rewriter, loop, loopNestClauseOps,
                            isTargetDevice ? nullptr : &targetClauseOps);
 
       LiveInShapeInfoMap liveInShapeInfoMap;
@@ -321,14 +323,13 @@ class DoConcurrentConversion
     }
 
     mlir::omp::ParallelOp parallelOp =
-        genParallelOp(doLoop.getLoc(), rewriter, ivInfos, mapper);
+        genParallelOp(rewriter, loop, ivInfos, mapper);
 
     // Only set as composite when part of `distribute parallel do`.
     parallelOp.setComposite(mapToDevice);
 
     if (!mapToDevice)
-      genLoopNestClauseOps(doLoop.getLoc(), rewriter, loop, mapper,
-                           loopNestClauseOps);
+      genLoopNestClauseOps(doLoop.getLoc(), rewriter, loop, loopNestClauseOps);
 
     for (mlir::Value local : locals)
       looputils::localizeLoopLocalValue(local, parallelOp.getRegion(),
@@ -337,10 +338,38 @@ class DoConcurrentConversion
     if (mapToDevice)
       genDistributeOp(doLoop.getLoc(), rewriter).setComposite(/*val=*/true);
 
-    mlir::omp::LoopNestOp ompLoopNest =
+    auto [loopNestOp, wsLoopOp] =
         genWsLoopOp(rewriter, loop, mapper, loopNestClauseOps,
                     /*isComposite=*/mapToDevice);
 
+    // `local` region arguments are transferred/cloned from the `do concurrent`
+    // loop to the loopnest op when the region is cloned above. Instead, these
+    // region arguments should be on the workshare loop's region.
+    if (mapToDevice) {
+      for (auto [parallelArg, loopNestArg] : llvm::zip_equal(
+               parallelOp.getRegion().getArguments(),
+               loopNestOp.getRegion().getArguments().slice(
+                   loop.getLocalOperandsStart(), loop.getNumLocalOperands())))
+        rewriter.replaceAllUsesWith(loopNestArg, parallelArg);
+
+      for (auto [wsloopArg, loopNestArg] : llvm::zip_equal(
+               wsLoopOp.getRegion().getArguments(),
+               loopNestOp.getRegion().getArguments().slice(
+                   loop.getReduceOperandsStart(), loop.getNumReduceOperands())))
+        rewriter.replaceAllUsesWith(loopNestArg, wsloopArg);
+    } else {
+      for (auto [wsloopArg, loopNestArg] :
+           llvm::zip_equal(wsLoopOp.getRegion().getArguments(),
+                           loopNestOp.getRegion().getArguments().drop_front(
+                               loopNestClauseOps.loopLowerBounds.size())))
+        rewriter.replaceAllUsesWith(loopNestArg, wsloopArg);
+    }
+
+    for (unsigned i = 0;
+         i < loop.getLocalVars().size() + loop.getReduceVars().size(); ++i)
+      loopNestOp.getRegion().eraseArgument(
+          loopNestClauseOps.loopLowerBounds.size());
+
     rewriter.setInsertionPoint(doLoop);
     fir::FirOpBuilder builder(
         rewriter,
@@ -361,7 +390,7 @@ class DoConcurrentConversion
     // Mark `unordered` loops that are not perfectly nested to be skipped from
     // the legality check of the `ConversionTarget` since we are not interested
     // in mapping them to OpenMP.
-    ompLoopNest->walk([&](fir::DoConcurrentOp doLoop) {
+    loopNestOp->walk([&](fir::DoConcurrentOp doLoop) {
       concurrentLoopsToSkip.insert(doLoop);
     });
 
@@ -372,11 +401,21 @@ class DoConcurrentConversion
 
 private:
   mlir::omp::ParallelOp
-  genParallelOp(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+  genParallelOp(mlir::ConversionPatternRewriter &rewriter,
+                fir::DoConcurrentLoopOp loop,
                 looputils::InductionVariableInfos &ivInfos,
                 mlir::IRMapping &mapper) const {
-    auto parallelOp = mlir::omp::ParallelOp::create(rewriter, loc);
-    rewriter.createBlock(&parallelOp.getRegion());
+    mlir::omp::ParallelOperands parallelOps;
+
+    if (mapToDevice)
+      genPrivatizers(rewriter, mapper, loop, parallelOps);
+
+    mlir::Location loc = loop.getLoc();
+    auto parallelOp = mlir::omp::ParallelOp::create(rewriter, loc, parallelOps);
+    Fortran::common::openmp::EntryBlockArgs parallelArgs;
+    parallelArgs.priv.vars = parallelOps.privateVars;
+    Fortran::common::openmp::genEntryBlock(rewriter, parallelArgs,
+                                           parallelOp.getRegion());
     rewriter.setInsertionPoint(mlir::omp::TerminatorOp::create(rewriter, loc));
 
     genLoopNestIndVarAllocs(rewriter, ivInfos, mapper);
@@ -413,7 +452,7 @@ class DoConcurrentConversion
 
   void genLoopNestClauseOps(
       mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
-      fir::DoConcurrentLoopOp loop, mlir::IRMapping &mapper,
+      fir::DoConcurrentLoopOp loop,
       mlir::omp::LoopNestOperands &loopNestClauseOps,
       mlir::omp::TargetOperands *targetClauseOps = nullptr) const {
     assert(loopNestClauseOps.loopLowerBounds.empty() &&
@@ -444,59 +483,14 @@ class DoConcurrentConversion
     loopNestClauseOps.loopInclusive = rewriter.getUnitAttr();
   }
 
-  mlir::omp::LoopNestOp
+  std::pair<mlir::omp::LoopNestOp, mlir::omp::WsloopOp>
   genWsLoopOp(mlir::ConversionPatternRewriter &rewriter,
               fir::DoConcurrentLoopOp loop, mlir::IRMapping &mapper,
               const mlir::omp::LoopNestOperands &clauseOps,
               bool isComposite) const {
     mlir::omp::WsloopOperands wsloopClauseOps;
-
-    auto cloneFIRRegionToOMP = [&rewriter](mlir::Region &firRegion,
-                                           mlir::Region &ompRegion) {
-      if (!firRegion.empty()) {
-        rewriter.cloneRegionBefore(firRegion, ompRegion, ompRegion.begin());
-        auto firYield =
-            mlir::cast<fir::YieldOp>(ompRegion.back().getTerminator());
-        rewriter.setInsertionPoint(firYield);
-        mlir::omp::YieldOp::create(rewriter, firYield.getLoc(),
-                                   firYield.getOperands());
-        rewriter.eraseOp(firYield);
-      }
-    };
-
-    // For `local` (and `local_init`) opernads, emit corresponding `private`
-    // clauses and attach these clauses to the workshare loop.
-    if (!loop.getLocalVars().empty())
-      for (auto [op, sym, arg] : llvm::zip_equal(
-               loop.getLocalVars(),
-               loop.getLocalSymsAttr().getAsRange<mlir::SymbolRefAttr>(),
-               loop.getRegionLocalArgs())) {
-        auto localizer = moduleSymbolTable.lookup<fir::LocalitySpecifierOp>(
-            sym.getLeafReference());
-        if (localizer.getLocalitySpecifierType() ==
-            fir::LocalitySpecifierType::LocalInit)
-          TODO(localizer.getLoc(),
-               "local_init conversion is not supported yet");
-
-        mlir::OpBuilder::InsertionGuard guard(rewriter);
-        rewriter.setInsertionPointAfter(localizer);
-
-        auto privatizer = mlir::omp::PrivateClauseOp::create(
-            rewriter, localizer.getLoc(), sym.getLeafReference().str() + ".omp",
-            localizer.getTypeAttr().getValue(),
-            mlir::omp::DataSharingClauseType::Private);
-
-        cloneFIRRegionToOMP(localizer.getInitRegion(),
-                            privatizer.getInitRegion());
-        cloneFIRRegionToOMP(localizer.getDeallocRegion(),
-                            privatizer.getDeallocRegion());
-
-        moduleSymbolTable.insert(privatizer);
-
-        wsloopClauseOps.privateVars.push_back(op);
-        wsloopClauseOps.privateSyms.push_back(
-            mlir::SymbolRefAttr::get(privatizer));
-      }
+    if (!mapToDevice)
+      genPrivatizers(rewriter, mapper, loop, wsloopClauseOps);
 
     if (!loop.getReduceVars().empty()) {
       for (auto [op, byRef, sym, arg] : llvm::zip_equal(
@@ -519,15 +513,15 @@ class DoConcurrentConversion
               rewriter, firReducer.getLoc(), ompReducerName,
               firReducer.getTypeAttr().getValue());
 
-          cloneFIRRegionToOMP(firReducer.getAllocRegion(),
+          cloneFIRRegionToOMP(rewriter, firReducer.getAllocRegion(),
                               ompReducer.getAllocRegion());
-          cloneFIRRegionToOMP(firReducer.getInitializerRegion(),
+          cloneFIRRegionToOMP(rewriter, firReducer.getInitializerRegion(),
                               ompReducer.getInitializerRegion());
-          cloneFIRRegionToOMP(firReducer.getReductionRegion(),
+          cloneFIRRegionToOMP(rewriter, firReducer.getReductionRegion(),
                               ompReducer.getReductionRegion());
-          cloneFIRRegionToOMP(firReducer.getAtomicReductionRegion(),
+          cloneFIRRegionToOMP(rewriter, firReducer.getAtomicReductionRegion(),
                               ompReducer.getAtomicReductionRegion());
-          cloneFIRRegionToOMP(firReducer.getCleanupRegion(),
+          cloneFIRRegionToOMP(rewriter, firReducer.getCleanupRegion(),
                               ompReducer.getCleanupRegion());
           moduleSymbolTable.insert(ompReducer);
         }
@@ -559,21 +553,10 @@ class DoConcurrentConversion
 
     rewriter.setInsertionPointToEnd(&loopNestOp.getRegion().back());
     mlir::omp::YieldOp::create(rewriter, loop->getLoc());
+    loop->getParentOfType<mlir::ModuleOp>().print(
+        llvm::errs(), mlir::OpPrintingFlags().assumeVerified());
 
-    // `local` region arguments are transferred/cloned from the `do concurrent`
-    // loop to the loopnest op when the region is cloned above. Instead, these
-    // region arguments should be on the workshare loop's region.
-    for (auto [wsloopArg, loopNestArg] :
-         llvm::zip_equal(wsloopOp.getRegion().getArguments(),
-                         loopNestOp.getRegion().getArguments().drop_front(
-                             clauseOps.loopLowerBounds.size())))
-      rewriter.replaceAllUsesWith(loopNestArg, wsloopArg);
-
-    for (unsigned i = 0;
-         i < loop.getLocalVars().size() + loop.getReduceVars().size(); ++i)
-      loopNestOp.getRegion().eraseArgument(clauseOps.loopLowerBounds.size());
-
-    return loopNestOp;
+    return {loopNestOp, wsloopOp};
   }
 
   void genBoundsOps(fir::FirOpBuilder &builder, mlir::Value liveIn,
@@ -817,6 +800,67 @@ class DoConcurrentConversion
     return distOp;
   }
 
+  void cloneFIRRegionToOMP(mlir::ConversionPatternRewriter &rewriter,
+                           mlir::Region &firRegion,
+                           mlir::Region &ompRegion) const {
+    if (!firRegion.empty()) {
+      rewriter.cloneRegionBefore(firRegion, ompRegion, ompRegion.begin());
+      auto firYield =
+          mlir::cast<fir::YieldOp>(ompRegion.back().getTerminator());
+      rewriter.setInsertionPoint(firYield);
+      mlir::omp::YieldOp::create(rewriter, firYield.getLoc(),
+                                 firYield.getOperands());
+      rewriter.eraseOp(firYield);
+    }
+  }
+
+  /// Generate bodies of OpenMP privatizers by cloning the bodies of FIR
+  /// privatizers.
+  ///
+  /// \param [in] rewriter - used to driver IR generation for privatizers.
+  /// \param [in] mapper - value mapping from FIR to OpenMP constructs.
+  /// \param [in] loop - FIR loop to convert its localizers.
+  ///
+  /// \param [out] privateClauseOps - OpenMP privatizers to gen their bodies.
+  void genPrivatizers(mlir::ConversionPatternRewriter &rewriter,
+                      mlir::IRMapping &mapper, fir::DoConcurrentLoopOp loop,
+                      mlir::omp::PrivateClauseOps &privateClauseOps) const {
+    // For `local` (and `local_init`) operands, emit corresponding `private`
+    // clauses and attach these clauses to the workshare loop.
+    if (!loop.getLocalVars().empty())
+      for (auto [var, sym, arg] : llvm::zip_equal(
+               loop.getLocalVars(),
+               loop.getLocalSymsAttr().getAsRange<mlir::SymbolRefAttr>(),
+               loop.getRegionLocalArgs())) {
+        auto localizer = moduleSymbolTable.lookup<fir::LocalitySpecifierOp>(
+            sym.getLeafReference());
+        if (localizer.getLocalitySpecifierType() ==
+            fir::LocalitySpecifierType::LocalInit)
+          TODO(localizer.getLoc(),
+               "local_init conversion is not supported yet");
+
+        mlir::OpBuilder::InsertionGuard guard(rewriter);
+        rewriter.setInsertionPointAfter(localizer);
+
+        auto privatizer = mlir::omp::PrivateClauseOp::create(
+            rewriter, localizer.getLoc(), sym.getLeafReference().str() + ".omp",
+            localizer.getTypeAttr().getValue(),
+            mlir::omp::DataSharingClauseType::Private);
+
+        cloneFIRRegionToOMP(rewriter, localizer.getInitRegion(),
+                            privatizer.getInitRegion());
+        cloneFIRRegionToOMP(rewriter, localizer.getDeallocRegion(),
+                            privatizer.getDeallocRegion());
+
+        moduleSymbolTable.insert(privatizer);
+
+        privateClauseOps.privateVars.push_back(mapToDevice ? mapper.lookup(var)
+                                                           : var);
+        privateClauseOps.privateSyms.push_back(
+            mlir::SymbolRefAttr::get(privatizer));
+      }
+  }
+
   bool mapToDevice;
   llvm::DenseSet<fir::DoConcurrentOp> &concurrentLoopsToSkip;
   mlir::SymbolTable &moduleSymbolTable;
diff --git a/flang/test/Transforms/DoConcurrent/local_device.mlir b/flang/test/Transforms/DoConcurrent/local_device.mlir
new file mode 100644
index 0000000000000..e54bb1aeb414e
--- /dev/null
+++ b/flang/test/Transforms/DoConcurrent/local_device.mlir
@@ -0,0 +1,49 @@
+// RUN: fir-opt --omp-do-concurrent-conversion="map-to=device" %s -o - | FileCheck %s
+
+fir.local {type = local} @_QFfooEmy_local_private_f32 : f32
+
+func.func @_QPfoo() {
+  %0 = fir.dummy_scope : !fir.dscope
+  %3 = fir.alloca f32 {bindc_name = "my_local", uniq_name = "_QFfooEmy_local"}
+  %4:2 = hlfir.declare %3 {uniq_name = "_QFfooEmy_local"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+
+  %c1 = arith.constant 1 : index
+  %c10 = arith.constant 10 : index
+
+  fir.do_concurrent {
+    %7 = fir.alloca i32 {bindc_name = "i"}
+    %8:2 = hlfir.declare %7 {uniq_name = "_QFfooEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+
+    fir.do_concurrent.loop (%arg0) = (%c1) to (%c10) step (%c1) local(@_QFfooEmy_local_private_f32 %4#0 -> %arg1 : !fir.ref<f32>) {
+      %9 = fir.convert %arg0 : (index) -> i32
+      fir.store %9 to %8#0 : !fir.ref<i32>
+      %10:2 = hlfir.declare %arg1 {uniq_name = "_QFfooEmy_local"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+      %cst = arith.constant 4.200000e+01 : f32
+      hlfir.assign %cst to %10#0 : f32, !fir.ref<f32>
+    }
+  }
+  return
+}
+
+// CHECK: omp.private {type = private} @[[OMP_PRIVATIZER:.*.omp]] : f32
+
+// CHECK: %[[LOCAL_DECL:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "{{.*}}my_local"}
+// CHECK: %[[LOCAL_MAP:.*]] = omp.map.info var_ptr(%[[LOCAL_DECL]]#1 : {{.*}})
+
+// CHECK: omp.target host_eval({{.*}}) map_entries({{.*}}, %[[LOCAL_MAP]] -> %[[LOCAL_MAP_ARG:.*]] : {{.*}}) {
+// CHECK:   %[[LOCAL_DEV_DECL:.*]]:2 = hlfir.declare %[[LOCAL_MAP_ARG]] {uniq_name = "_QFfooEmy_local"}
+
+// CHECK:   omp.teams {
+// CHECK:     omp.parallel private(@[[OMP_PRIVATIZER]] %[[LOCAL_DEV_DECL]]#0 -> %[[LOCAL_PRIV_ARG:.*]] : {{.*}}) {
+// CHECK:       omp.distribute {
+// CHECK:         omp.wsloop {
+// CHECK:           omp.loop_nest {{.*}} {
+// CHECK:             %[[LOCAL_LOOP_DECL:.*]]:2 = hlfir.declare %[[LOCAL_PRIV_ARG]] {uniq_name = "_QFfooEmy_local"}
+// CHECK:             hlfir.assign %{{.*}} to %[[LOCAL_LOOP_DECL]]#0
+// CHECK:             omp.yield
+// CHECK:           }
+// CHECK:         }
+// CHECK:       }
+// CHECK:     }
+// CHECK:   }
+// CHECK: }

>From fabec23622acb08756b2a1a31a728cc08dc10648 Mon Sep 17 00:00:00 2001
From: Matheus Izvekov <mizvekov at gmail.com>
Date: Tue, 23 Sep 2025 02:30:09 -0300
Subject: [PATCH 06/42] [clang] NFC: add a few template template parameter test
 cases (#160230)

I also posted these on the core reflector today.
---
 clang/test/SemaTemplate/cwg2398.cpp           | 19 +++++++++++++++
 clang/test/SemaTemplate/temp_arg_template.cpp | 23 +++++++++++++++++++
 2 files changed, 42 insertions(+)

diff --git a/clang/test/SemaTemplate/cwg2398.cpp b/clang/test/SemaTemplate/cwg2398.cpp
index 06333c81a603e..315fa952932c5 100644
--- a/clang/test/SemaTemplate/cwg2398.cpp
+++ b/clang/test/SemaTemplate/cwg2398.cpp
@@ -672,3 +672,22 @@ namespace nttp_partial_order {
     template void f<B>(B<nullptr>);
   } // namespace t6
 } // namespace nttp_partial_order
+
+namespace nttp_inconsistent {
+  namespace t1 {
+    template<class A, A B> struct X {};
+    // expected-error at -1 {{conflicting deduction 'C' against 'int' for parameter}}
+    template<template<class C, int D> class TT> struct Y {};
+    // expected-note at -1 {{previous template template parameter is here}}
+    template struct Y<X>;
+    // expected-note at -1 {{has different template parameters}}
+  } // namespace t1
+  namespace t2 {
+    template<class A, A B = 0> struct X {};
+    // expected-error at -1 {{conflicting deduction 'C' against 'int' for parameter}}
+    template<template<class C> class TT> struct Y {};
+    // expected-note at -1 {{previous template template parameter is here}}
+    template struct Y<X>;
+    // expected-note at -1 {{has different template parameters}}
+  } // namespace t2
+} // namespace nttp_inconsistent
diff --git a/clang/test/SemaTemplate/temp_arg_template.cpp b/clang/test/SemaTemplate/temp_arg_template.cpp
index c9576e2057e53..73fa57beebe11 100644
--- a/clang/test/SemaTemplate/temp_arg_template.cpp
+++ b/clang/test/SemaTemplate/temp_arg_template.cpp
@@ -168,3 +168,26 @@ namespace PR10147 {
   template<template<typename...> class A> void f(A<int>*) { A<> a; } // expected-warning 0-1{{extension}}
   void g() { f((A<>*)0); }
 }
+
+#if __cplusplus >= 201703L
+namespace multiple_conversions {
+  constexpr int g = 1;
+  struct Z {
+      constexpr operator const int&() const { return g; }
+      constexpr operator int() { return 2; }
+  } z;
+
+  template<template<const int&> class TT> struct A {
+    static constexpr int value = TT<z>::value;
+  };
+
+  template<int I> struct B {
+    static constexpr int value = I;
+  };
+  // FIXME: This should probably convert z to (const int &) first, then
+  // convert that to int.
+  static_assert(A<B>::value == 1);
+  // cxx17-error at -1 {{static assertion failed}}
+  // cxx17-note at -2 {{expression evaluates to '2 == 1'}}
+} // namespace multiple_conversions
+#endif

>From 9223a890c392131a887e33a00f1c8e01d906d546 Mon Sep 17 00:00:00 2001
From: Kareem Ergawy <kareem.ergawy at amd.com>
Date: Tue, 23 Sep 2025 07:56:16 +0200
Subject: [PATCH 07/42] [flang][OpenMP] `do concurrent`: support `reduce` on
 device (#156610)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Extends `do concurrent` to OpenMP device mapping by adding support for
mapping `reduce` specifiers to omp `reduction` clauses. The changes
attach 2 `reduction` clauses to the mapped OpenMP construct: one on the
`teams` part of the construct and one on the `wloop` part.

- https://github.com/llvm/llvm-project/pull/155754
- https://github.com/llvm/llvm-project/pull/155987
- https://github.com/llvm/llvm-project/pull/155992
- https://github.com/llvm/llvm-project/pull/155993
- https://github.com/llvm/llvm-project/pull/157638
- https://github.com/llvm/llvm-project/pull/156610 ◀️
- https://github.com/llvm/llvm-project/pull/156837
---
 .../OpenMP/DoConcurrentConversion.cpp         | 117 ++++++++++--------
 .../DoConcurrent/reduce_device.mlir           |  53 ++++++++
 2 files changed, 121 insertions(+), 49 deletions(-)
 create mode 100644 flang/test/Transforms/DoConcurrent/reduce_device.mlir

diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
index fb99623128621..03ff16366a9d2 100644
--- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
+++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
@@ -141,6 +141,9 @@ void collectLoopLiveIns(fir::DoConcurrentLoopOp loop,
 
   for (mlir::Value local : loop.getLocalVars())
     liveIns.push_back(local);
+
+  for (mlir::Value reduce : loop.getReduceVars())
+    liveIns.push_back(reduce);
 }
 
 /// Collects values that are local to a loop: "loop-local values". A loop-local
@@ -319,7 +322,7 @@ class DoConcurrentConversion
       targetOp =
           genTargetOp(doLoop.getLoc(), rewriter, mapper, loopNestLiveIns,
                       targetClauseOps, loopNestClauseOps, liveInShapeInfoMap);
-      genTeamsOp(doLoop.getLoc(), rewriter);
+      genTeamsOp(rewriter, loop, mapper);
     }
 
     mlir::omp::ParallelOp parallelOp =
@@ -492,46 +495,7 @@ class DoConcurrentConversion
     if (!mapToDevice)
       genPrivatizers(rewriter, mapper, loop, wsloopClauseOps);
 
-    if (!loop.getReduceVars().empty()) {
-      for (auto [op, byRef, sym, arg] : llvm::zip_equal(
-               loop.getReduceVars(), loop.getReduceByrefAttr().asArrayRef(),
-               loop.getReduceSymsAttr().getAsRange<mlir::SymbolRefAttr>(),
-               loop.getRegionReduceArgs())) {
-        auto firReducer = moduleSymbolTable.lookup<fir::DeclareReductionOp>(
-            sym.getLeafReference());
-
-        mlir::OpBuilder::InsertionGuard guard(rewriter);
-        rewriter.setInsertionPointAfter(firReducer);
-        std::string ompReducerName = sym.getLeafReference().str() + ".omp";
-
-        auto ompReducer =
-            moduleSymbolTable.lookup<mlir::omp::DeclareReductionOp>(
-                rewriter.getStringAttr(ompReducerName));
-
-        if (!ompReducer) {
-          ompReducer = mlir::omp::DeclareReductionOp::create(
-              rewriter, firReducer.getLoc(), ompReducerName,
-              firReducer.getTypeAttr().getValue());
-
-          cloneFIRRegionToOMP(rewriter, firReducer.getAllocRegion(),
-                              ompReducer.getAllocRegion());
-          cloneFIRRegionToOMP(rewriter, firReducer.getInitializerRegion(),
-                              ompReducer.getInitializerRegion());
-          cloneFIRRegionToOMP(rewriter, firReducer.getReductionRegion(),
-                              ompReducer.getReductionRegion());
-          cloneFIRRegionToOMP(rewriter, firReducer.getAtomicReductionRegion(),
-                              ompReducer.getAtomicReductionRegion());
-          cloneFIRRegionToOMP(rewriter, firReducer.getCleanupRegion(),
-                              ompReducer.getCleanupRegion());
-          moduleSymbolTable.insert(ompReducer);
-        }
-
-        wsloopClauseOps.reductionVars.push_back(op);
-        wsloopClauseOps.reductionByref.push_back(byRef);
-        wsloopClauseOps.reductionSyms.push_back(
-            mlir::SymbolRefAttr::get(ompReducer));
-      }
-    }
+    genReductions(rewriter, mapper, loop, wsloopClauseOps);
 
     auto wsloopOp =
         mlir::omp::WsloopOp::create(rewriter, loop.getLoc(), wsloopClauseOps);
@@ -553,8 +517,6 @@ class DoConcurrentConversion
 
     rewriter.setInsertionPointToEnd(&loopNestOp.getRegion().back());
     mlir::omp::YieldOp::create(rewriter, loop->getLoc());
-    loop->getParentOfType<mlir::ModuleOp>().print(
-        llvm::errs(), mlir::OpPrintingFlags().assumeVerified());
 
     return {loopNestOp, wsloopOp};
   }
@@ -778,15 +740,26 @@ class DoConcurrentConversion
                                             liveInName, shape);
   }
 
-  mlir::omp::TeamsOp
-  genTeamsOp(mlir::Location loc,
-             mlir::ConversionPatternRewriter &rewriter) const {
-    auto teamsOp = rewriter.create<mlir::omp::TeamsOp>(
-        loc, /*clauses=*/mlir::omp::TeamsOperands{});
+  mlir::omp::TeamsOp genTeamsOp(mlir::ConversionPatternRewriter &rewriter,
+                                fir::DoConcurrentLoopOp loop,
+                                mlir::IRMapping &mapper) const {
+    mlir::omp::TeamsOperands teamsOps;
+    genReductions(rewriter, mapper, loop, teamsOps);
+
+    mlir::Location loc = loop.getLoc();
+    auto teamsOp = rewriter.create<mlir::omp::TeamsOp>(loc, teamsOps);
+    Fortran::common::openmp::EntryBlockArgs teamsArgs;
+    teamsArgs.reduction.vars = teamsOps.reductionVars;
+    Fortran::common::openmp::genEntryBlock(rewriter, teamsArgs,
+                                           teamsOp.getRegion());
 
-    rewriter.createBlock(&teamsOp.getRegion());
     rewriter.setInsertionPoint(rewriter.create<mlir::omp::TerminatorOp>(loc));
 
+    for (auto [loopVar, teamsArg] : llvm::zip_equal(
+             loop.getReduceVars(), teamsOp.getRegion().getArguments())) {
+      mapper.map(loopVar, teamsArg);
+    }
+
     return teamsOp;
   }
 
@@ -861,6 +834,52 @@ class DoConcurrentConversion
       }
   }
 
+  void genReductions(mlir::ConversionPatternRewriter &rewriter,
+                     mlir::IRMapping &mapper, fir::DoConcurrentLoopOp loop,
+                     mlir::omp::ReductionClauseOps &reductionClauseOps) const {
+    if (!loop.getReduceVars().empty()) {
+      for (auto [var, byRef, sym, arg] : llvm::zip_equal(
+               loop.getReduceVars(), loop.getReduceByrefAttr().asArrayRef(),
+               loop.getReduceSymsAttr().getAsRange<mlir::SymbolRefAttr>(),
+               loop.getRegionReduceArgs())) {
+        auto firReducer = moduleSymbolTable.lookup<fir::DeclareReductionOp>(
+            sym.getLeafReference());
+
+        mlir::OpBuilder::InsertionGuard guard(rewriter);
+        rewriter.setInsertionPointAfter(firReducer);
+        std::string ompReducerName = sym.getLeafReference().str() + ".omp";
+
+        auto ompReducer =
+            moduleSymbolTable.lookup<mlir::omp::DeclareReductionOp>(
+                rewriter.getStringAttr(ompReducerName));
+
+        if (!ompReducer) {
+          ompReducer = mlir::omp::DeclareReductionOp::create(
+              rewriter, firReducer.getLoc(), ompReducerName,
+              firReducer.getTypeAttr().getValue());
+
+          cloneFIRRegionToOMP(rewriter, firReducer.getAllocRegion(),
+                              ompReducer.getAllocRegion());
+          cloneFIRRegionToOMP(rewriter, firReducer.getInitializerRegion(),
+                              ompReducer.getInitializerRegion());
+          cloneFIRRegionToOMP(rewriter, firReducer.getReductionRegion(),
+                              ompReducer.getReductionRegion());
+          cloneFIRRegionToOMP(rewriter, firReducer.getAtomicReductionRegion(),
+                              ompReducer.getAtomicReductionRegion());
+          cloneFIRRegionToOMP(rewriter, firReducer.getCleanupRegion(),
+                              ompReducer.getCleanupRegion());
+          moduleSymbolTable.insert(ompReducer);
+        }
+
+        reductionClauseOps.reductionVars.push_back(
+            mapToDevice ? mapper.lookup(var) : var);
+        reductionClauseOps.reductionByref.push_back(byRef);
+        reductionClauseOps.reductionSyms.push_back(
+            mlir::SymbolRefAttr::get(ompReducer));
+      }
+    }
+  }
+
   bool mapToDevice;
   llvm::DenseSet<fir::DoConcurrentOp> &concurrentLoopsToSkip;
   mlir::SymbolTable &moduleSymbolTable;
diff --git a/flang/test/Transforms/DoConcurrent/reduce_device.mlir b/flang/test/Transforms/DoConcurrent/reduce_device.mlir
new file mode 100644
index 0000000000000..3e46692a15dca
--- /dev/null
+++ b/flang/test/Transforms/DoConcurrent/reduce_device.mlir
@@ -0,0 +1,53 @@
+// RUN: fir-opt --omp-do-concurrent-conversion="map-to=device" %s -o - | FileCheck %s
+
+fir.declare_reduction @add_reduction_f32 : f32 init {
+^bb0(%arg0: f32):
+  %cst = arith.constant 0.000000e+00 : f32
+  fir.yield(%cst : f32)
+} combiner {
+^bb0(%arg0: f32, %arg1: f32):
+  %0 = arith.addf %arg0, %arg1 fastmath<contract> : f32
+  fir.yield(%0 : f32)
+}
+
+func.func @_QPfoo() {
+  %0 = fir.dummy_scope : !fir.dscope
+  %3 = fir.alloca f32 {bindc_name = "s", uniq_name = "_QFfooEs"}
+  %4:2 = hlfir.declare %3 {uniq_name = "_QFfooEs"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+    %c1 = arith.constant 1 : index
+  %c10 = arith.constant 1 : index
+  fir.do_concurrent {
+    %7 = fir.alloca i32 {bindc_name = "i"}
+    %8:2 = hlfir.declare %7 {uniq_name = "_QFfooEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+    fir.do_concurrent.loop (%arg0) = (%c1) to (%c10) step (%c1) reduce(@add_reduction_f32 #fir.reduce_attr<add> %4#0 -> %arg1 : !fir.ref<f32>) {
+      %9 = fir.convert %arg0 : (index) -> i32
+      fir.store %9 to %8#0 : !fir.ref<i32>
+      %10:2 = hlfir.declare %arg1 {uniq_name = "_QFfooEs"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+      %11 = fir.load %10#0 : !fir.ref<f32>
+      %cst = arith.constant 1.000000e+00 : f32
+      %12 = arith.addf %11, %cst fastmath<contract> : f32
+      hlfir.assign %12 to %10#0 : f32, !fir.ref<f32>
+    }
+  }
+  return
+}
+
+// CHECK: omp.declare_reduction @[[OMP_RED:.*.omp]] : f32
+
+// CHECK: %[[S_DECL:.*]]:2 = hlfir.declare %6 {uniq_name = "_QFfooEs"}
+// CHECK: %[[S_MAP:.*]] = omp.map.info var_ptr(%[[S_DECL]]#1
+
+// CHECK: omp.target host_eval({{.*}}) map_entries({{.*}}, %[[S_MAP]] -> %[[S_TARGET_ARG:.*]] : {{.*}}) {
+// CHECK:   %[[S_DEV_DECL:.*]]:2 = hlfir.declare %[[S_TARGET_ARG]]
+// CHECK:   omp.teams reduction(@[[OMP_RED]] %[[S_DEV_DECL]]#0 -> %[[RED_TEAMS_ARG:.*]] : !fir.ref<f32>) {
+// CHECK:   omp.parallel {
+// CHECK:     omp.distribute {
+// CHECK:       omp.wsloop reduction(@[[OMP_RED]] %[[RED_TEAMS_ARG]] -> %[[RED_WS_ARG:.*]] : {{.*}}) {
+// CHECK:         %[[S_WS_DECL:.*]]:2 = hlfir.declare %[[RED_WS_ARG]] {uniq_name = "_QFfooEs"}
+// CHECK:         %[[S_VAL:.*]] = fir.load %[[S_WS_DECL]]#0
+// CHECK:         %[[RED_RES:.*]] = arith.addf %[[S_VAL]], %{{.*}} fastmath<contract> : f32
+// CHECK:         hlfir.assign %[[RED_RES]] to %[[S_WS_DECL]]#0
+// CHECK:       }
+// CHECK:     }
+// CHECK:   }
+// CHECK: }

>From 6efb0963216b7a019dfdc5bb5756229aa8d33c4e Mon Sep 17 00:00:00 2001
From: Rux124 <jhlee755 at andestech.com>
Date: Tue, 23 Sep 2025 14:03:30 +0800
Subject: [PATCH 08/42] [RISCV] Add MC layer support for Andes XAndesVSIntH
 extension. (#159514)

Add MC layer support for Andes XAndesVSIntH extension. The spec is
available at:
https://github.com/andestech/andes-v5-isa/releases/tag/ast-v5_4_0-release
---
 .../Driver/print-supported-extensions-riscv.c |  1 +
 .../riscv-target-features-andes.c             |  9 +++
 llvm/docs/RISCVUsage.rst                      |  3 +
 llvm/docs/ReleaseNotes.md                     |  1 +
 .../RISCV/Disassembler/RISCVDisassembler.cpp  |  4 +-
 llvm/lib/Target/RISCV/RISCVFeatures.td        |  8 +++
 llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td | 53 ++++++++++++++++
 llvm/test/CodeGen/RISCV/attributes-andes.ll   |  4 ++
 llvm/test/CodeGen/RISCV/features-info.ll      |  1 +
 llvm/test/MC/RISCV/xandesvsinth-valid.s       | 60 +++++++++++++++++++
 .../TargetParser/RISCVISAInfoTest.cpp         |  1 +
 11 files changed, 143 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/MC/RISCV/xandesvsinth-valid.s

diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index f5bef085c587e..c44a0e8515c03 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -165,6 +165,7 @@
 // CHECK-NEXT:     xandesvbfhcvt        5.0       'XAndesVBFHCvt' (Andes Vector BFLOAT16 Conversion Extension)
 // CHECK-NEXT:     xandesvdot           5.0       'XAndesVDot' (Andes Vector Dot Product Extension)
 // CHECK-NEXT:     xandesvpackfph       5.0       'XAndesVPackFPH' (Andes Vector Packed FP16 Extension)
+// CHECK-NEXT:     xandesvsinth         5.0       'XAndesVSIntH' (Andes Vector Small INT Handling Extension)
 // CHECK-NEXT:     xandesvsintload      5.0       'XAndesVSIntLoad' (Andes Vector INT4 Load Extension)
 // CHECK-NEXT:     xcvalu               1.0       'XCValu' (CORE-V ALU Operations)
 // CHECK-NEXT:     xcvbi                1.0       'XCVbi' (CORE-V Immediate Branching)
diff --git a/clang/test/Preprocessor/riscv-target-features-andes.c b/clang/test/Preprocessor/riscv-target-features-andes.c
index f7981bb52de6d..385d421ecb744 100644
--- a/clang/test/Preprocessor/riscv-target-features-andes.c
+++ b/clang/test/Preprocessor/riscv-target-features-andes.c
@@ -6,6 +6,7 @@
 // CHECK-NOT: __riscv_xandesperf {{.*$}}
 // CHECK-NOT: __riscv_xandesbfhcvt {{.*$}}
 // CHECK-NOT: __riscv_xandesvbfhcvt {{.*$}}
+// CHECK-NOT: __riscv_xandesvsinth {{.*$}}
 // CHECK-NOT: __riscv_xandesvsintload {{.*$}}
 // CHECK-NOT: __riscv_xandesvpackfph {{.*$}}
 // CHECK-NOT: __riscv_xandesvdot {{.*$}}
@@ -34,6 +35,14 @@
 // RUN:   -o - | FileCheck --check-prefix=CHECK-XANDESVBFHCVT %s
 // CHECK-XANDESVBFHCVT: __riscv_xandesvbfhcvt  5000000{{$}}
 
+// RUN: %clang --target=riscv32 \
+// RUN:   -march=rv32i_xandesvsinth -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-XANDESVSINTH %s
+// RUN: %clang --target=riscv64 \
+// RUN:   -march=rv64i_xandesvsinth -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-XANDESVSINTH %s
+// CHECK-XANDESVSINTH: __riscv_xandesvsinth  5000000{{$}}
+
 // RUN: %clang --target=riscv32 \
 // RUN:   -march=rv32i_xandesvsintload -E -dM %s \
 // RUN:   -o - | FileCheck --check-prefix=CHECK-XANDESVSINTLOAD %s
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index 2ea571e12a277..7b1a6ce834919 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -527,6 +527,9 @@ The current vendor extensions supported are:
 ``XAndesVBFHCvt``
   LLVM implements `version 5.0.0 of the Andes Vector BFLOAT16 Conversion Extension specification <https://github.com/andestech/andes-v5-isa/releases/download/ast-v5_4_0-release/AndeStar_V5_ISA_Spec_UM165-v1.5.08-20250317.pdf>`__ by Andes Technology. All instructions are prefixed with `nds.` as described in the specification.
 
+``XAndesVSINTH``
+  LLVM implements `version 5.0.0 of the Andes Vector Small Int Handling Extension specification <https://github.com/andestech/andes-v5-isa/releases/download/ast-v5_4_0-release/AndeStar_V5_ISA_Spec_UM165-v1.5.08-20250317.pdf>`__ by Andes Technology. All instructions are prefixed with `nds.` as described in the specification.
+
 ``XAndesVSINTLoad``
   LLVM implements `version 5.0.0 of the Andes Vector INT4 Load Extension specification <https://github.com/andestech/andes-v5-isa/releases/download/ast-v5_4_0-release/AndeStar_V5_ISA_Spec_UM165-v1.5.08-20250317.pdf>`__ by Andes Technology. All instructions are prefixed with `nds.` as described in the specification.
 
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index c211844c62491..40cddb45df84d 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -126,6 +126,7 @@ Changes to the RISC-V Backend
 * Add support for Zvfbfa (Additional BF16 vector compute support)
 * Adds experimental support for the 'Zibi` (Branch with Immediate) extension.
 * Add support for Zvfofp8min (OFP8 conversion extension)
+* Adds assembler support for the Andes `XAndesvsinth` (Andes Vector Small Int Handling Extension).
 
 Changes to the WebAssembly Backend
 ----------------------------------
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 9f070fb2ff3e2..b8ec0bbfcd3bb 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -676,8 +676,8 @@ static constexpr FeatureBitset XTHeadGroup = {
     RISCV::FeatureVendorXTHeadVdot};
 
 static constexpr FeatureBitset XAndesGroup = {
-    RISCV::FeatureVendorXAndesPerf, RISCV::FeatureVendorXAndesBFHCvt,
-    RISCV::FeatureVendorXAndesVBFHCvt,
+    RISCV::FeatureVendorXAndesPerf,      RISCV::FeatureVendorXAndesBFHCvt,
+    RISCV::FeatureVendorXAndesVBFHCvt,   RISCV::FeatureVendorXAndesVSIntH,
     RISCV::FeatureVendorXAndesVSIntLoad, RISCV::FeatureVendorXAndesVPackFPH,
     RISCV::FeatureVendorXAndesVDot};
 
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 677d93521c6f1..a02de31d1cc4d 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1648,6 +1648,14 @@ def HasVendorXAndesVBFHCvt
       AssemblerPredicate<(all_of FeatureVendorXAndesVBFHCvt),
                          "'XAndesVBFHCvt' (Andes Vector BFLOAT16 Conversion Extension)">;
 
+def FeatureVendorXAndesVSIntH
+    : RISCVExtension<5, 0, "Andes Vector Small INT Handling Extension",
+                     [FeatureStdExtZve32x]>;
+def HasVendorXAndesVSIntH
+    : Predicate<"Subtarget->hasVendorXAndesVSIntH()">,
+      AssemblerPredicate<(all_of FeatureVendorXAndesVSIntH),
+                         "'XAndesVSIntH' (Andes Vector Small INT Handling Extension)">;
+
 def FeatureVendorXAndesVSIntLoad
     : RISCVExtension<5, 0, "Andes Vector INT4 Load Extension",
                      [FeatureStdExtZve32x]>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index 1fb30a0b73d92..9835c033aea9c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -362,6 +362,47 @@ class NDSRVInstSDGP<bits<3> funct3, string opcodestr>
   let mayStore = 1;
 }
 
+class NDSRVInstVSINTLN<bits<5> funct5, string opcodestr>
+    : RVInst<(outs VR:$vd), (ins GPRMemZeroOffset:$rs1),
+             opcodestr, "$vd, ${rs1}", [], InstFormatR>,
+      VLESchedMC {
+  bits<5> rs1;
+  bits<5> vd;
+
+  let Inst{31-26} = 0b000001;
+  let Inst{25} = 1;
+  let Inst{24-20} = funct5;
+  let Inst{19-15} = rs1;
+  let Inst{14-12} = 0b100;
+  let Inst{11-7} = vd;
+  let Inst{6-0} = OPC_CUSTOM_2.Value;
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+  let mayStore = 0;
+  let Uses = [VTYPE, VL];
+}
+
+class NDSRVInstVSINTCvt<bits<5> fucnt5, string opcodestr>
+    : RVInst<(outs VR:$vd), (ins VR:$vs, VMaskOp:$vm),
+             opcodestr, "$vd, $vs$vm", [], InstFormatR> {
+  bits<5> vs;
+  bits<5> vd;
+  bit vm;
+
+  let Inst{31-26} = 0b000000;
+  let Inst{25} = vm;
+  let Inst{24-20} = vs;
+  let Inst{19-15} = fucnt5;
+  let Inst{14-12} = 0b100;
+  let Inst{11-7} = vd;
+  let Inst{6-0} = OPC_CUSTOM_2.Value;
+  let hasSideEffects = 0;
+  let mayLoad = 0;
+  let mayStore = 0;
+  let Uses = [FRM, VL, VTYPE];
+  let RVVConstraint = VMConstraint;
+}
+
 class NDSRVInstBFHCvt<bits<7> funct7, bits<5> rs1val, DAGOperand rdty,
                       DAGOperand rs2ty, string opcodestr>
     : RVInstR<funct7, 0b100, OPC_CUSTOM_2, (outs rdty:$rd),
@@ -679,6 +720,18 @@ let Uses = [FRM, VL, VTYPE] in
 def NDS_VFNCVT_BF16_S : NDSRVInstVBFHCvt<0b00001, "nds.vfncvt.bf16.s">;
 }
 
+//===----------------------------------------------------------------------===//
+// XAndesVSIntH
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasVendorXAndesVSIntH] in {
+  def NDS_VFWCVT_F_N  : NDSRVInstVSINTCvt<0b00100, "nds.vfwcvt.f.n.v">;
+  def NDS_VFWCVT_F_NU : NDSRVInstVSINTCvt<0b00101, "nds.vfwcvt.f.nu.v">;
+  def NDS_VFWCVT_F_B  : NDSRVInstVSINTCvt<0b00110, "nds.vfwcvt.f.b.v">;
+  def NDS_VFWCVT_F_BU : NDSRVInstVSINTCvt<0b00111, "nds.vfwcvt.f.bu.v">;
+  def NDS_VLE4_V : NDSRVInstVSINTLN<0b00000, "nds.vle4.v">;
+}
+
 //===----------------------------------------------------------------------===//
 // XAndesVSIntLoad
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/attributes-andes.ll b/llvm/test/CodeGen/RISCV/attributes-andes.ll
index ed27a9255a86f..fc2b1b123af51 100644
--- a/llvm/test/CodeGen/RISCV/attributes-andes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes-andes.ll
@@ -3,6 +3,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesperf %s -o - | FileCheck --check-prefix=RV32XANDESPERF %s
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesbfhcvt %s -o - | FileCheck --check-prefix=RV32XANDESBFHCVT %s
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesvbfhcvt %s -o - | FileCheck --check-prefix=RV32XANDESVBFHCVT %s
+; RUN: llc -mtriple=riscv32 -mattr=+xandesvsinth %s -o - | FileCheck --check-prefix=RV32XANDESVSINTH %s
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesvsintload %s -o - | FileCheck --check-prefix=RV32XANDESVSINTLOAD %s
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesvdot %s -o - | FileCheck --check-prefix=RV32XANDESVDOT %s
 ; RUN: llc -mtriple=riscv32 -mattr=+xandesvpackfph %s -o - | FileCheck --check-prefix=RV32XANDESVPACKFPH %s
@@ -10,6 +11,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesperf %s -o - | FileCheck --check-prefix=RV64XANDESPERF %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesbfhcvt %s -o - | FileCheck --check-prefix=RV64XANDESBFHCVT %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesvbfhcvt %s -o - | FileCheck --check-prefix=RV64XANDESVBFHCVT %s
+; RUN: llc -mtriple=riscv64 -mattr=+xandesvsinth %s -o - | FileCheck --check-prefix=RV64XANDESVSINTH %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesvsintload %s -o - | FileCheck --check-prefix=RV64XANDESVSINTLOAD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesvdot %s -o - | FileCheck --check-prefix=RV64XANDESVDOT %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xandesvpackfph %s -o - | FileCheck --check-prefix=RV64XANDESVPACKFPH %s
@@ -17,6 +19,7 @@
 ; RV32XANDESPERF: .attribute 5, "rv32i2p1_xandesperf5p0"
 ; RV32XANDESBFHCVT: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_xandesbfhcvt5p0"
 ; RV32XANDESVBFHCVT: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvl32b1p0_xandesvbfhcvt5p0"
+; RV32XANDESVSINTH: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvsinth5p0"
 ; RV32XANDESVSINTLOAD: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvsintload5p0"
 ; RV32XANDESVDOT: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvdot5p0"
 ; RV32XANDESVPACKFPH: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_xandesvpackfph5p0"
@@ -24,6 +27,7 @@
 ; RV64XANDESPERF: .attribute 5, "rv64i2p1_xandesperf5p0"
 ; RV64XANDESBFHCVT: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_xandesbfhcvt5p0"
 ; RV64XANDESVBFHCVT: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvl32b1p0_xandesvbfhcvt5p0"
+; RV64XANDESVSINTH: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvsinth5p0"
 ; RV64XANDESVSINTLOAD: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvsintload5p0"
 ; RV64XANDESVDOT: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_xandesvdot5p0"
 ; RV64XANDESVPACKFPH: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_xandesvpackfph5p0"
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index fc77d6cb7c7be..1a7a72d3e072b 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -191,6 +191,7 @@
 ; CHECK-NEXT:   xandesvbfhcvt                    - 'XAndesVBFHCvt' (Andes Vector BFLOAT16 Conversion Extension).
 ; CHECK-NEXT:   xandesvdot                       - 'XAndesVDot' (Andes Vector Dot Product Extension).
 ; CHECK-NEXT:   xandesvpackfph                   - 'XAndesVPackFPH' (Andes Vector Packed FP16 Extension).
+; CHECK-NEXT:   xandesvsinth                     - 'XAndesVSIntH' (Andes Vector Small INT Handling Extension).
 ; CHECK-NEXT:   xandesvsintload                  - 'XAndesVSIntLoad' (Andes Vector INT4 Load Extension).
 ; CHECK-NEXT:   xcvalu                           - 'XCValu' (CORE-V ALU Operations).
 ; CHECK-NEXT:   xcvbi                            - 'XCVbi' (CORE-V Immediate Branching).
diff --git a/llvm/test/MC/RISCV/xandesvsinth-valid.s b/llvm/test/MC/RISCV/xandesvsinth-valid.s
new file mode 100644
index 0000000000000..387bb116fe86f
--- /dev/null
+++ b/llvm/test/MC/RISCV/xandesvsinth-valid.s
@@ -0,0 +1,60 @@
+# XAndesVSIntLoad - Andes Vector INT4 Load Extension
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+xandesvsinth -show-encoding \
+# RUN:     | FileCheck -check-prefixes=CHECK-ASM %s
+# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+xandesvsinth < %s \
+# RUN:     | llvm-objdump --mattr=+xandesvsinth -M no-aliases -d -r - \
+# RUN:     | FileCheck -check-prefixes=CHECK-OBJ %s
+# RUN: not llvm-mc -triple=riscv32 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+xandesvsinth -show-encoding \
+# RUN:     | FileCheck -check-prefixes=CHECK-ASM %s
+# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+xandesvsinth < %s \
+# RUN:     | llvm-objdump --mattr=+xandesvsinth -M no-aliases -d -r - \
+# RUN:     | FileCheck -check-prefixes=CHECK-OBJ %s
+# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# CHECK-OBJ: nds.vfwcvt.f.n.v  v8, v10
+# CHECK-ASM: nds.vfwcvt.f.n.v  v8, v10
+# CHECK-ASM: encoding: [0x5b,0x44,0xa2,0x02]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.n.v v8, v10
+# CHECK-OBJ: nds.vfwcvt.f.n.v  v8, v10, v0.t
+# CHECK-ASM: nds.vfwcvt.f.n.v  v8, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0x44,0xa2,0x00]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.n.v v8, v10, v0.t
+# CHECK-OBJ: nds.vfwcvt.f.nu.v v8, v10
+# CHECK-ASM: nds.vfwcvt.f.nu.v v8, v10
+# CHECK-ASM: encoding: [0x5b,0xc4,0xa2,0x02]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.nu.v v8, v10
+# CHECK-OBJ: nds.vfwcvt.f.nu.v v8, v10, v0.t
+# CHECK-ASM: nds.vfwcvt.f.nu.v v8, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0xc4,0xa2,0x00]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.nu.v v8, v10, v0.t
+# CHECK-OBJ: nds.vfwcvt.f.b.v  v8, v10
+# CHECK-ASM: nds.vfwcvt.f.b.v  v8, v10
+# CHECK-ASM: encoding: [0x5b,0x44,0xa3,0x02]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.b.v v8, v10
+# CHECK-OBJ: nds.vfwcvt.f.b.v  v8, v10, v0.t
+# CHECK-ASM: nds.vfwcvt.f.b.v  v8, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0x44,0xa3,0x00]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.b.v v8, v10, v0.t
+# CHECK-OBJ: nds.vfwcvt.f.bu.v v8, v10
+# CHECK-ASM: nds.vfwcvt.f.bu.v v8, v10
+# CHECK-ASM: encoding: [0x5b,0xc4,0xa3,0x02]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.bu.v v8, v10
+# CHECK-OBJ: nds.vfwcvt.f.bu.v v8, v10, v0.t
+# CHECK-ASM: nds.vfwcvt.f.bu.v v8, v10, v0.t
+# CHECK-ASM: encoding: [0x5b,0xc4,0xa3,0x00]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vfwcvt.f.bu.v v8, v10, v0.t
+# CHECK-OBJ: nds.vle4.v      v8, (a0)
+# CHECK-ASM: nds.vle4.v      v8, (a0)
+# CHECK-ASM: encoding: [0x5b,0x44,0x05,0x06]
+# CHECK-ERROR: instruction requires the following: 'XAndesVSIntH' (Andes Vector Small INT Handling Extension){{$}}
+nds.vle4.v v8, (a0)
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index b8efab6399779..5c6c824dadd7d 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1137,6 +1137,7 @@ R"(All available -march extensions for RISC-V
     xandesvbfhcvt        5.0
     xandesvdot           5.0
     xandesvpackfph       5.0
+    xandesvsinth         5.0
     xandesvsintload      5.0
     xcvalu               1.0
     xcvbi                1.0

>From 63ac83c26e9eae9d71cd65b67e7ae0fc537b13dc Mon Sep 17 00:00:00 2001
From: Kareem Ergawy <kareem.ergawy at amd.com>
Date: Tue, 23 Sep 2025 08:32:31 +0200
Subject: [PATCH 09/42] [flang][OpenMP] Support multi-block reduction combiner 
 regions on the GPU (#156837)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fixes a bug related to insertion points when inlining multi-block
combiner reduction regions. The IP at the end of the inlined region was
not used resulting in emitting BBs with multiple terminators.

PR stack:
- https://github.com/llvm/llvm-project/pull/155754
- https://github.com/llvm/llvm-project/pull/155987
- https://github.com/llvm/llvm-project/pull/155992
- https://github.com/llvm/llvm-project/pull/155993
- https://github.com/llvm/llvm-project/pull/157638
- https://github.com/llvm/llvm-project/pull/156610
- https://github.com/llvm/llvm-project/pull/156837 ◀️
---
 llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp     |  3 +
 .../omptarget-multi-block-reduction.mlir      | 87 +++++++++++++++++++
 2 files changed, 90 insertions(+)
 create mode 100644 mlir/test/Target/LLVMIR/omptarget-multi-block-reduction.mlir

diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 220eee3cb8b08..6d948f184392d 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -3507,6 +3507,8 @@ Expected<Function *> OpenMPIRBuilder::createReductionFunction(
         return AfterIP.takeError();
       if (!Builder.GetInsertBlock())
         return ReductionFunc;
+
+      Builder.restoreIP(*AfterIP);
       Builder.CreateStore(Reduced, LHSPtr);
     }
   }
@@ -3751,6 +3753,7 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createReductionsGPU(
           RI.ReductionGen(Builder.saveIP(), RHSValue, LHSValue, Reduced);
       if (!AfterIP)
         return AfterIP.takeError();
+      Builder.restoreIP(*AfterIP);
       Builder.CreateStore(Reduced, LHS, false);
     }
   }
diff --git a/mlir/test/Target/LLVMIR/omptarget-multi-block-reduction.mlir b/mlir/test/Target/LLVMIR/omptarget-multi-block-reduction.mlir
new file mode 100644
index 0000000000000..87ff0ba786648
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/omptarget-multi-block-reduction.mlir
@@ -0,0 +1,87 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+// Verifies that the IR builder can handle reductions with multi-block combiner
+// regions on the GPU.
+
+module attributes {dlti.dl_spec = #dlti.dl_spec<"dlti.alloca_memory_space" = 5 : ui64, "dlti.global_memory_space" = 1 : ui64>, llvm.target_triple = "amdgcn-amd-amdhsa", omp.is_gpu = true, omp.is_target_device = true} {
+  llvm.func @bar() {}
+  llvm.func @baz() {}
+
+  omp.declare_reduction @add_reduction_byref_box_5xf32 : !llvm.ptr alloc {
+    %0 = llvm.mlir.constant(1 : i64) : i64
+    %1 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> : (i64) -> !llvm.ptr<5>
+    %2 = llvm.addrspacecast %1 : !llvm.ptr<5> to !llvm.ptr
+    omp.yield(%2 : !llvm.ptr)
+  } init {
+  ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+    omp.yield(%arg1 : !llvm.ptr)
+  } combiner {
+  ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
+    llvm.call @bar() : () -> ()
+    llvm.br ^bb3
+
+  ^bb3:  // pred: ^bb1
+    llvm.call @baz() : () -> ()
+    omp.yield(%arg0 : !llvm.ptr)
+  }
+  llvm.func @foo_() {
+    %c1 = llvm.mlir.constant(1 : i64) : i64
+    %10 = llvm.alloca %c1 x !llvm.array<5 x f32> {bindc_name = "x"} : (i64) -> !llvm.ptr<5>
+    %11 = llvm.addrspacecast %10 : !llvm.ptr<5> to !llvm.ptr
+    %74 = omp.map.info var_ptr(%11 : !llvm.ptr, !llvm.array<5 x f32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = "x"}
+    omp.target map_entries(%74 -> %arg0 : !llvm.ptr) {
+      %c1_2 = llvm.mlir.constant(1 : i32) : i32
+      %c10 = llvm.mlir.constant(10 : i32) : i32
+      omp.teams reduction(byref @add_reduction_byref_box_5xf32 %arg0 -> %arg2 : !llvm.ptr) {
+        omp.parallel {
+          omp.distribute {
+            omp.wsloop {
+              omp.loop_nest (%arg5) : i32 = (%c1_2) to (%c10) inclusive step (%c1_2) {
+                omp.yield
+              }
+            } {omp.composite}
+          } {omp.composite}
+          omp.terminator
+        } {omp.composite}
+        omp.terminator
+      }
+      omp.terminator
+    }
+    llvm.return
+  }
+}
+
+// CHECK:      call void @__kmpc_parallel_51({{.*}}, i32 1, i32 -1, i32 -1,
+// CHECK-SAME:   ptr @[[PAR_OUTLINED:.*]], ptr null, ptr %2, i64 1)
+
+// CHECK: define internal void @[[PAR_OUTLINED]]{{.*}} {
+// CHECK:   .omp.reduction.then:
+// CHECK:     br label %omp.reduction.nonatomic.body
+
+// CHECK:   omp.reduction.nonatomic.body:
+// CHECK:     call void @bar()
+// CHECK:     br label %[[BODY_2ND_BB:.*]]
+
+// CHECK:   [[BODY_2ND_BB]]:
+// CHECK:     call void @baz()
+// CHECK:     br label %[[CONT_BB:.*]]
+
+// CHECK:   [[CONT_BB]]:
+// CHECK-NEXT: %[[RED_RHS:.*]] = phi ptr [ %final.rhs, %{{.*}} ]
+// CHECK-NEXT: store ptr %[[RED_RHS]], ptr %{{.*}}, align 8
+// CHECK-NEXT: br label %.omp.reduction.done
+// CHECK: }
+
+// CHECK: define internal void @"{{.*}}$reduction$reduction_func"(ptr noundef %0, ptr noundef %1) #0 {
+// CHECK:     br label %omp.reduction.nonatomic.body
+
+// CHECK:   [[BODY_2ND_BB:.*]]:
+// CHECK:     call void @baz()
+// CHECK:     br label %omp.region.cont
+
+
+// CHECK: omp.reduction.nonatomic.body:
+// CHECK:   call void @bar()
+// CHECK:     br label %[[BODY_2ND_BB]]
+
+// CHECK: }

>From d20791fcbb90f77fb6ec2d9c10bc7c5592d7453b Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Tue, 23 Sep 2025 09:07:51 +0200
Subject: [PATCH 10/42] [clang][bytecode] Print dummy-status of global
 variables (#160240)

in Program::dump().
---
 clang/lib/AST/ByteCode/Disasm.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/clang/lib/AST/ByteCode/Disasm.cpp b/clang/lib/AST/ByteCode/Disasm.cpp
index ab3b9f7c3b1d7..fd0903f2e652c 100644
--- a/clang/lib/AST/ByteCode/Disasm.cpp
+++ b/clang/lib/AST/ByteCode/Disasm.cpp
@@ -323,6 +323,8 @@ LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
                         : TerminalColor{llvm::raw_ostream::RED, false});
       OS << (GP.isInitialized() ? "initialized " : "uninitialized ");
     }
+    if (GP.block()->isDummy())
+      OS << "dummy ";
     Desc->dump(OS);
 
     if (GP.isInitialized() && Desc->IsTemporary) {

>From 5a5cf4841bd99be41ec60a2179d1d225abc87c8f Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Tue, 23 Sep 2025 08:18:46 +0100
Subject: [PATCH 11/42] [AArch64] Scalarize extracted vector loads. (#159714)

Given a vector load that is only extracted from, it is more efficient to
perform the individual loads than a single load and many extracts. This
adds a late optimization for scalarizing extracted vector loads that do
not have any other uses and will not be more efficiently kept in fpr
registers.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  63 ++
 .../CodeGen/AArch64/arm64-convert-v4f64.ll    |  17 +-
 .../AArch64/arm64-i16-subreg-extract.ll       |   6 +-
 .../test/CodeGen/AArch64/arm64-ldp-cluster.ll |  14 +-
 .../test/CodeGen/AArch64/complex-int-to-fp.ll |   8 +-
 .../CodeGen/AArch64/extract-vector-elt.ll     |  14 +-
 llvm/test/CodeGen/AArch64/itofp-bf16.ll       | 570 +++++++++---------
 .../AArch64/ragreedy-local-interval-cost.ll   | 343 +++++------
 .../CodeGen/AArch64/scalarize-vector-load.ll  | 394 +++++-------
 .../AArch64/sme-streaming-interface.ll        |   4 +-
 .../AArch64/sve-fixed-length-ext-loads.ll     |   4 +-
 .../AArch64/sve-fixed-length-masked-gather.ll |   3 +-
 .../AArch64/sve-fixed-length-splat-vector.ll  |  16 +-
 ...e-streaming-mode-fixed-length-ext-loads.ll |  16 +-
 ...-streaming-mode-fixed-length-ld2-alloca.ll |   4 +-
 ...eaming-mode-fixed-length-vector-shuffle.ll |  64 +-
 llvm/test/CodeGen/AArch64/vector-compress.ll  |   7 +-
 17 files changed, 741 insertions(+), 806 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cd7f0e719ad0c..09b31616e0882 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -20467,6 +20467,69 @@ performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
     }
   }
 
+  // Given an extract(load) or extract(extend(load)), produce a scalar load
+  // instead to avoid the cross-register-bank copies.
+  if (DCI.isAfterLegalizeDAG() && Subtarget->isLittleEndian() &&
+      VT.isInteger() && isa<ConstantSDNode>(N1)) {
+    SDValue LoadN0 = N0;
+    // Look through sext/zext and extract_subvector / insert_subvector if
+    // required.
+    if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
+         N0.getOpcode() == ISD::SIGN_EXTEND ||
+         N0.getOpcode() == ISD::ANY_EXTEND) &&
+        N0.getOperand(0).hasOneUse())
+      LoadN0 = N0.getOperand(0);
+    unsigned OffsetElts = 0;
+    if (LoadN0.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
+      OffsetElts = LoadN0.getConstantOperandVal(1);
+      LoadN0 = LoadN0.getOperand(0);
+    }
+    if (LoadN0.getOpcode() == ISD::INSERT_SUBVECTOR &&
+        LoadN0.getOperand(0).isUndef() &&
+        isNullConstant(LoadN0.getOperand(2)) &&
+        LoadN0.getOperand(1).hasOneUse())
+      LoadN0 = LoadN0.getOperand(1);
+
+    // Check all the uses are valid and can be scalarized. We check that all the
+    // uses are extracts and those extracts are not re-inserted into an
+    // operation best treated as a vector register.
+    auto Load = dyn_cast<LoadSDNode>(LoadN0);
+    if (Load && Load->isSimple() && ISD::isNormalLoad(Load) &&
+        Load->getMemoryVT().isByteSized() &&
+        all_of(N0->uses(), [&](const SDUse &U) {
+          return U.getResNo() != N0.getResNo() ||
+                 (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+                  !any_of(U.getUser()->uses(), [](const SDUse &U2) {
+                    return U2.getUser()->getOpcode() ==
+                               ISD::INSERT_VECTOR_ELT ||
+                           U2.getUser()->getOpcode() == ISD::BUILD_VECTOR ||
+                           U2.getUser()->getOpcode() == ISD::SCALAR_TO_VECTOR;
+                  }));
+        })) {
+
+      SDLoc DL(Load);
+
+      // Generate a new scalar load.
+      unsigned Offset = (OffsetElts + N->getConstantOperandVal(1)) *
+                        Load->getValueType(0).getScalarSizeInBits() / 8;
+      SDValue BasePtr = DAG.getObjectPtrOffset(
+          DL, Load->getBasePtr(), DAG.getConstant(Offset, DL, MVT::i64));
+      ISD::LoadExtType ExtType =
+          N0.getOpcode() == ISD::ZERO_EXTEND
+              ? ISD::ZEXTLOAD
+              : (N0.getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD
+                                                    : ISD::EXTLOAD);
+      SDValue ScalarLoad =
+          DAG.getExtLoad(ExtType, DL, VT, Load->getChain(), BasePtr,
+                         Load->getPointerInfo().getWithOffset(Offset),
+                         Load->getValueType(0).getScalarType(),
+                         commonAlignment(Load->getAlign(), Offset),
+                         Load->getMemOperand()->getFlags(), Load->getAAInfo());
+      DAG.makeEquivalentMemoryOrdering(Load, ScalarLoad);
+      return ScalarLoad;
+    }
+  }
+
   return SDValue();
 }
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index 2b9e334cc7812..2b313fa8ce55f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -53,18 +53,15 @@ define <4 x half> @uitofp_v4i64_to_v4f16(ptr %ptr) {
 define <4 x bfloat> @uitofp_v4i64_to_v4bf16(ptr %ptr) {
 ; CHECK-LABEL: uitofp_v4i64_to_v4bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q2, [x0]
-; CHECK-NEXT:    mov x8, v0.d[1]
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    ucvtf s1, x9
-; CHECK-NEXT:    mov x9, v2.d[1]
-; CHECK-NEXT:    ucvtf s0, x8
-; CHECK-NEXT:    fmov x8, d2
-; CHECK-NEXT:    ucvtf s2, x8
+; CHECK-NEXT:    ldp x8, x9, [x0]
+; CHECK-NEXT:    movi v2.4s, #127, msl #8
+; CHECK-NEXT:    ucvtf s0, x9
+; CHECK-NEXT:    ucvtf s1, x8
+; CHECK-NEXT:    ldp x8, x9, [x0, #16]
 ; CHECK-NEXT:    mov v1.s[1], v0.s[0]
+; CHECK-NEXT:    ucvtf s0, x8
+; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    ucvtf s0, x9
-; CHECK-NEXT:    mov v1.s[2], v2.s[0]
-; CHECK-NEXT:    movi v2.4s, #127, msl #8
 ; CHECK-NEXT:    mov v1.s[3], v0.s[0]
 ; CHECK-NEXT:    movi v0.4s, #1
 ; CHECK-NEXT:    ushr v3.4s, v1.4s, #16
diff --git a/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll b/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
index 59f887a1143c0..a93203793307a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
@@ -4,10 +4,8 @@
 define i32 @foo(ptr %__a) nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    umov.h w8, v0[0]
-; CHECK-NEXT:    umov.h w9, v0[0]
-; CHECK-NEXT:    add w0, w9, w8, uxth #1
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    add w0, w8, w8, lsl #1
 ; CHECK-NEXT:    ret
   %tmp18 = load <4 x i16>, ptr %__a, align 8
   %vget_lane = extractelement <4 x i16> %tmp18, i32 0
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index 114203e46f196..13093cb2204ce 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -105,13 +105,13 @@ define i32 @ldr_int_volatile(ptr %a) nounwind {
 ; CHECK: Cluster ld/st SU(1) - SU(3)
 ; CHECK: SU(1):   %{{[0-9]+}}:fpr128 = LDRQui
 ; CHECK: SU(3):   %{{[0-9]+}}:fpr128 = LDRQui
-define <2 x i64> @ldq_cluster(ptr %p) {
-  %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+define <4 x i32> @ldq_cluster(ptr %p) {
+  %tmp1 = load <4 x i32>, ptr %p, align 8
   %add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
-  %tmp2 = add nsw <2 x i64> %tmp1, %tmp1
-  %tmp3 = load <2 x i64>, ptr %add.ptr2, align 8
-  %res  = mul nsw <2 x i64> %tmp2, %tmp3
-  ret <2 x i64> %res
+  %tmp2 = add nsw <4 x i32> %tmp1, %tmp1
+  %tmp3 = load <4 x i32>, ptr %add.ptr2, align 8
+  %res  = mul nsw <4 x i32> %tmp2, %tmp3
+  ret <4 x i32> %res
 }
 
 ; CHECK: ********** MI Scheduling **********
@@ -215,7 +215,7 @@ exit:
 ; CHECK: ********** MI Scheduling **********
 ; CHECK: LDURXi_LDRXui:%bb.0 entry
 ; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3):  %{{[0-9]+}}:gpr64 = LDURXi 
+; CHECK: SU(3):  %{{[0-9]+}}:gpr64 = LDURXi
 ; CHECK: SU(4):  %{{[0-9]+}}:gpr64 = LDRXui
 ;
 define void @LDURXi_LDRXui(ptr nocapture readonly %arg, ptr nocapture readonly %wa, ptr nocapture readonly %wb) {
diff --git a/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll b/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
index baca159f9dd55..02dfaa19acc9d 100644
--- a/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
@@ -4,11 +4,9 @@
 define void @autogen_SD19655(ptr %addr, ptr %addrfloat) {
 ; CHECK-LABEL: autogen_SD19655:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mov.d x8, v0[1]
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    scvtf s1, x9
-; CHECK-NEXT:    scvtf s0, x8
+; CHECK-NEXT:    ldp x8, x9, [x0]
+; CHECK-NEXT:    scvtf s0, x9
+; CHECK-NEXT:    scvtf s1, x8
 ; CHECK-NEXT:    mov.s v1[1], v0[0]
 ; CHECK-NEXT:    str d1, [x1]
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
index 6ab703c08b837..121cc30692124 100644
--- a/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/extract-vector-elt.ll
@@ -1114,16 +1114,10 @@ entry:
 }
 
 define ptr @v3ext(<3 x ptr> %a, <3 x ptr> %b, <3 x ptr> %x) {
-; CHECK-SD-LABEL: v3ext:
-; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    ldr d0, [sp]
-; CHECK-SD-NEXT:    fmov x0, d0
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: v3ext:
-; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldr x0, [sp]
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: v3ext:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x0, [sp]
+; CHECK-NEXT:    ret
 entry:
   %c = extractelement <3 x ptr> %x, i32 2
   ret ptr %c
diff --git a/llvm/test/CodeGen/AArch64/itofp-bf16.ll b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
index 42641693c4081..0d3ae559449a4 100644
--- a/llvm/test/CodeGen/AArch64/itofp-bf16.ll
+++ b/llvm/test/CodeGen/AArch64/itofp-bf16.ll
@@ -740,162 +740,151 @@ entry:
 define <32 x bfloat> @stofp_v32i64_v32bf16(<32 x i64> %a) {
 ; CHECK-LABEL: stofp_v32i64_v32bf16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    mov x9, v3.d[1]
-; CHECK-NEXT:    mov x8, v2.d[1]
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    movi v3.4s, #1
-; CHECK-NEXT:    scvtf s2, x10
-; CHECK-NEXT:    mov x10, v0.d[1]
-; CHECK-NEXT:    scvtf s19, x9
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    scvtf s16, x11
-; CHECK-NEXT:    mov x11, v6.d[1]
-; CHECK-NEXT:    scvtf s0, x12
-; CHECK-NEXT:    scvtf s18, x8
-; CHECK-NEXT:    mov x8, v1.d[1]
+; CHECK-NEXT:    ldp x8, x9, [sp, #32]
+; CHECK-NEXT:    mov x13, v2.d[1]
+; CHECK-NEXT:    ldp x10, x12, [sp, #96]
+; CHECK-NEXT:    fmov x14, d3
+; CHECK-NEXT:    movi v17.4s, #1
+; CHECK-NEXT:    scvtf s18, x9
+; CHECK-NEXT:    scvtf s16, x8
+; CHECK-NEXT:    ldp x8, x9, [sp, #48]
+; CHECK-NEXT:    scvtf s23, x12
 ; CHECK-NEXT:    scvtf s20, x10
-; CHECK-NEXT:    scvtf s17, x9
-; CHECK-NEXT:    mov x9, v7.d[1]
-; CHECK-NEXT:    mov x10, v4.d[1]
-; CHECK-NEXT:    scvtf s21, x11
-; CHECK-NEXT:    fmov x11, d6
-; CHECK-NEXT:    mov v2.s[1], v18.s[0]
-; CHECK-NEXT:    scvtf s25, x8
-; CHECK-NEXT:    movi v6.4s, #127, msl #8
-; CHECK-NEXT:    mov v0.s[1], v20.s[0]
-; CHECK-NEXT:    ldp q24, q20, [sp, #32]
-; CHECK-NEXT:    scvtf s22, x9
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    scvtf s1, x11
-; CHECK-NEXT:    scvtf s26, x10
-; CHECK-NEXT:    fmov x11, d7
-; CHECK-NEXT:    mov v2.s[2], v16.s[0]
-; CHECK-NEXT:    ldp q18, q16, [sp]
-; CHECK-NEXT:    mov x8, v24.d[1]
-; CHECK-NEXT:    scvtf s4, x9
-; CHECK-NEXT:    fmov x9, d5
-; CHECK-NEXT:    mov v0.s[2], v17.s[0]
-; CHECK-NEXT:    mov v1.s[1], v21.s[0]
-; CHECK-NEXT:    scvtf s23, x11
-; CHECK-NEXT:    mov x11, v5.d[1]
-; CHECK-NEXT:    mov v2.s[3], v19.s[0]
+; CHECK-NEXT:    mov x10, v0.d[1]
+; CHECK-NEXT:    scvtf s27, x13
 ; CHECK-NEXT:    scvtf s21, x8
-; CHECK-NEXT:    mov x8, v20.d[1]
-; CHECK-NEXT:    scvtf s17, x9
-; CHECK-NEXT:    fmov x9, d24
-; CHECK-NEXT:    mov v4.s[1], v26.s[0]
-; CHECK-NEXT:    mov v0.s[3], v25.s[0]
-; CHECK-NEXT:    ldp q26, q24, [sp, #96]
-; CHECK-NEXT:    mov v1.s[2], v23.s[0]
-; CHECK-NEXT:    ldp q25, q23, [sp, #64]
-; CHECK-NEXT:    scvtf s7, x11
-; CHECK-NEXT:    scvtf s27, x8
-; CHECK-NEXT:    fmov x8, d18
-; CHECK-NEXT:    scvtf s5, x9
-; CHECK-NEXT:    mov x10, v26.d[1]
-; CHECK-NEXT:    mov x9, v18.d[1]
-; CHECK-NEXT:    fmov x11, d20
-; CHECK-NEXT:    mov v4.s[2], v17.s[0]
-; CHECK-NEXT:    mov v1.s[3], v22.s[0]
-; CHECK-NEXT:    ushr v19.4s, v2.4s, #16
-; CHECK-NEXT:    scvtf s17, x8
-; CHECK-NEXT:    fmov x8, d26
-; CHECK-NEXT:    add v26.4s, v2.4s, v6.4s
+; CHECK-NEXT:    ldp x8, x11, [sp]
+; CHECK-NEXT:    mov v16.s[1], v18.s[0]
+; CHECK-NEXT:    scvtf s24, x9
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    mov v20.s[1], v23.s[0]
 ; CHECK-NEXT:    scvtf s22, x11
-; CHECK-NEXT:    mov x11, v25.d[1]
-; CHECK-NEXT:    mov v5.s[1], v21.s[0]
-; CHECK-NEXT:    scvtf s28, x10
-; CHECK-NEXT:    fmov x10, d16
-; CHECK-NEXT:    scvtf s21, x9
-; CHECK-NEXT:    fmov x9, d25
-; CHECK-NEXT:    scvtf s18, x8
-; CHECK-NEXT:    mov x8, v16.d[1]
-; CHECK-NEXT:    mov v4.s[3], v7.s[0]
-; CHECK-NEXT:    and v19.16b, v19.16b, v3.16b
-; CHECK-NEXT:    scvtf s16, x10
-; CHECK-NEXT:    fmov x10, d24
+; CHECK-NEXT:    ldp x11, x12, [sp, #16]
+; CHECK-NEXT:    scvtf s19, x8
+; CHECK-NEXT:    mov x8, v3.d[1]
+; CHECK-NEXT:    mov v16.s[2], v21.s[0]
 ; CHECK-NEXT:    scvtf s25, x11
-; CHECK-NEXT:    scvtf s20, x9
-; CHECK-NEXT:    mov x9, v24.d[1]
-; CHECK-NEXT:    mov v17.s[1], v21.s[0]
-; CHECK-NEXT:    fmov x11, d23
-; CHECK-NEXT:    mov v18.s[1], v28.s[0]
-; CHECK-NEXT:    scvtf s24, x8
-; CHECK-NEXT:    scvtf s21, x10
-; CHECK-NEXT:    mov x10, v23.d[1]
-; CHECK-NEXT:    mov v5.s[2], v22.s[0]
-; CHECK-NEXT:    ushr v22.4s, v1.4s, #16
-; CHECK-NEXT:    ushr v28.4s, v0.4s, #16
+; CHECK-NEXT:    ldp x9, x11, [sp, #112]
+; CHECK-NEXT:    mov v19.s[1], v22.s[0]
+; CHECK-NEXT:    scvtf s22, x12
+; CHECK-NEXT:    scvtf s26, x9
+; CHECK-NEXT:    ldp x9, x12, [sp, #64]
 ; CHECK-NEXT:    scvtf s23, x11
-; CHECK-NEXT:    mov v20.s[1], v25.s[0]
-; CHECK-NEXT:    scvtf s25, x9
-; CHECK-NEXT:    mov v17.s[2], v16.s[0]
-; CHECK-NEXT:    add v16.4s, v19.4s, v26.4s
-; CHECK-NEXT:    ushr v26.4s, v4.4s, #16
-; CHECK-NEXT:    mov v18.s[2], v21.s[0]
-; CHECK-NEXT:    scvtf s7, x10
-; CHECK-NEXT:    and v22.16b, v22.16b, v3.16b
-; CHECK-NEXT:    mov v5.s[3], v27.s[0]
-; CHECK-NEXT:    and v21.16b, v28.16b, v3.16b
-; CHECK-NEXT:    fcmeq v19.4s, v2.4s, v2.4s
-; CHECK-NEXT:    mov v20.s[2], v23.s[0]
-; CHECK-NEXT:    add v23.4s, v0.4s, v6.4s
+; CHECK-NEXT:    mov v16.s[3], v24.s[0]
+; CHECK-NEXT:    fmov x11, d2
+; CHECK-NEXT:    scvtf s24, x12
+; CHECK-NEXT:    scvtf s2, x9
+; CHECK-NEXT:    mov x9, v6.d[1]
+; CHECK-NEXT:    ldp x12, x13, [sp, #80]
+; CHECK-NEXT:    scvtf s21, x11
+; CHECK-NEXT:    mov x11, v4.d[1]
+; CHECK-NEXT:    mov v19.s[2], v25.s[0]
+; CHECK-NEXT:    mov v20.s[2], v26.s[0]
+; CHECK-NEXT:    ushr v25.4s, v16.4s, #16
+; CHECK-NEXT:    scvtf s26, x14
+; CHECK-NEXT:    scvtf s3, x12
+; CHECK-NEXT:    mov v2.s[1], v24.s[0]
+; CHECK-NEXT:    scvtf s24, x10
+; CHECK-NEXT:    fmov x10, d6
+; CHECK-NEXT:    fmov x12, d0
+; CHECK-NEXT:    scvtf s6, x9
+; CHECK-NEXT:    mov v21.s[1], v27.s[0]
+; CHECK-NEXT:    scvtf s27, x11
+; CHECK-NEXT:    fmov x11, d7
+; CHECK-NEXT:    mov v19.s[3], v22.s[0]
+; CHECK-NEXT:    mov v20.s[3], v23.s[0]
+; CHECK-NEXT:    add v22.4s, v16.4s, v18.4s
+; CHECK-NEXT:    mov v2.s[2], v3.s[0]
+; CHECK-NEXT:    scvtf s3, x10
+; CHECK-NEXT:    fmov x10, d4
+; CHECK-NEXT:    scvtf s0, x12
+; CHECK-NEXT:    and v23.16b, v25.16b, v17.16b
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x12, d5
+; CHECK-NEXT:    mov v21.s[2], v26.s[0]
+; CHECK-NEXT:    scvtf s25, x13
+; CHECK-NEXT:    scvtf s4, x10
+; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    add v26.4s, v20.4s, v18.4s
+; CHECK-NEXT:    mov v3.s[1], v6.s[0]
+; CHECK-NEXT:    scvtf s6, x11
+; CHECK-NEXT:    mov x11, v5.d[1]
+; CHECK-NEXT:    scvtf s5, x8
+; CHECK-NEXT:    mov v0.s[1], v24.s[0]
+; CHECK-NEXT:    add v22.4s, v23.4s, v22.4s
+; CHECK-NEXT:    scvtf s1, x10
+; CHECK-NEXT:    mov x10, v7.d[1]
+; CHECK-NEXT:    scvtf s7, x12
+; CHECK-NEXT:    mov v4.s[1], v27.s[0]
+; CHECK-NEXT:    ushr v23.4s, v19.4s, #16
+; CHECK-NEXT:    mov v2.s[3], v25.s[0]
+; CHECK-NEXT:    mov v3.s[2], v6.s[0]
+; CHECK-NEXT:    add v25.4s, v19.4s, v18.4s
+; CHECK-NEXT:    ushr v24.4s, v20.4s, #16
+; CHECK-NEXT:    mov v21.s[3], v5.s[0]
+; CHECK-NEXT:    scvtf s5, x11
+; CHECK-NEXT:    fcmeq v29.4s, v20.4s, v20.4s
+; CHECK-NEXT:    scvtf s6, x10
+; CHECK-NEXT:    and v23.16b, v23.16b, v17.16b
+; CHECK-NEXT:    mov v0.s[2], v1.s[0]
+; CHECK-NEXT:    scvtf s1, x9
+; CHECK-NEXT:    mov v4.s[2], v7.s[0]
+; CHECK-NEXT:    and v24.16b, v24.16b, v17.16b
+; CHECK-NEXT:    fcmeq v7.4s, v16.4s, v16.4s
+; CHECK-NEXT:    orr v16.4s, #64, lsl #16
+; CHECK-NEXT:    fcmeq v31.4s, v2.4s, v2.4s
+; CHECK-NEXT:    add v27.4s, v21.4s, v18.4s
+; CHECK-NEXT:    orr v20.4s, #64, lsl #16
+; CHECK-NEXT:    mov v3.s[3], v6.s[0]
+; CHECK-NEXT:    add v6.4s, v23.4s, v25.4s
+; CHECK-NEXT:    ushr v23.4s, v21.4s, #16
+; CHECK-NEXT:    mov v0.s[3], v1.s[0]
+; CHECK-NEXT:    mov v4.s[3], v5.s[0]
+; CHECK-NEXT:    ushr v1.4s, v2.4s, #16
+; CHECK-NEXT:    add v24.4s, v24.4s, v26.4s
+; CHECK-NEXT:    add v25.4s, v2.4s, v18.4s
+; CHECK-NEXT:    fcmeq v5.4s, v19.4s, v19.4s
+; CHECK-NEXT:    and v23.16b, v23.16b, v17.16b
+; CHECK-NEXT:    orr v19.4s, #64, lsl #16
 ; CHECK-NEXT:    orr v2.4s, #64, lsl #16
-; CHECK-NEXT:    mov v17.s[3], v24.s[0]
-; CHECK-NEXT:    add v24.4s, v1.4s, v6.4s
-; CHECK-NEXT:    fcmeq v27.4s, v1.4s, v1.4s
-; CHECK-NEXT:    mov v18.s[3], v25.s[0]
-; CHECK-NEXT:    add v25.4s, v4.4s, v6.4s
-; CHECK-NEXT:    orr v1.4s, #64, lsl #16
-; CHECK-NEXT:    bit v2.16b, v16.16b, v19.16b
-; CHECK-NEXT:    mov v20.s[3], v7.s[0]
-; CHECK-NEXT:    add v22.4s, v22.4s, v24.4s
-; CHECK-NEXT:    add v7.4s, v21.4s, v23.4s
-; CHECK-NEXT:    ushr v24.4s, v17.4s, #16
-; CHECK-NEXT:    and v23.16b, v26.16b, v3.16b
-; CHECK-NEXT:    ushr v26.4s, v5.4s, #16
-; CHECK-NEXT:    ushr v28.4s, v18.4s, #16
-; CHECK-NEXT:    add v30.4s, v17.4s, v6.4s
-; CHECK-NEXT:    add v31.4s, v18.4s, v6.4s
-; CHECK-NEXT:    fcmeq v21.4s, v0.4s, v0.4s
-; CHECK-NEXT:    orr v0.4s, #64, lsl #16
-; CHECK-NEXT:    bit v1.16b, v22.16b, v27.16b
-; CHECK-NEXT:    ushr v29.4s, v20.4s, #16
-; CHECK-NEXT:    and v24.16b, v24.16b, v3.16b
-; CHECK-NEXT:    add v23.4s, v23.4s, v25.4s
-; CHECK-NEXT:    and v28.16b, v28.16b, v3.16b
-; CHECK-NEXT:    and v25.16b, v26.16b, v3.16b
-; CHECK-NEXT:    add v26.4s, v5.4s, v6.4s
-; CHECK-NEXT:    add v6.4s, v20.4s, v6.4s
-; CHECK-NEXT:    and v3.16b, v29.16b, v3.16b
-; CHECK-NEXT:    add v24.4s, v24.4s, v30.4s
-; CHECK-NEXT:    fcmeq v30.4s, v17.4s, v17.4s
-; CHECK-NEXT:    add v28.4s, v28.4s, v31.4s
-; CHECK-NEXT:    fcmeq v31.4s, v18.4s, v18.4s
-; CHECK-NEXT:    fcmeq v29.4s, v4.4s, v4.4s
+; CHECK-NEXT:    ushr v28.4s, v3.4s, #16
+; CHECK-NEXT:    and v1.16b, v1.16b, v17.16b
+; CHECK-NEXT:    bsl v7.16b, v22.16b, v16.16b
+; CHECK-NEXT:    ushr v26.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v30.4s, v4.4s, #16
+; CHECK-NEXT:    add v23.4s, v23.4s, v27.4s
+; CHECK-NEXT:    bsl v5.16b, v6.16b, v19.16b
+; CHECK-NEXT:    mov v6.16b, v29.16b
+; CHECK-NEXT:    and v27.16b, v28.16b, v17.16b
+; CHECK-NEXT:    add v28.4s, v3.4s, v18.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v25.4s
+; CHECK-NEXT:    and v25.16b, v26.16b, v17.16b
+; CHECK-NEXT:    add v26.4s, v0.4s, v18.4s
+; CHECK-NEXT:    and v17.16b, v30.16b, v17.16b
+; CHECK-NEXT:    add v18.4s, v4.4s, v18.4s
+; CHECK-NEXT:    fcmeq v30.4s, v21.4s, v21.4s
+; CHECK-NEXT:    orr v21.4s, #64, lsl #16
+; CHECK-NEXT:    add v27.4s, v27.4s, v28.4s
+; CHECK-NEXT:    fcmeq v28.4s, v3.4s, v3.4s
+; CHECK-NEXT:    orr v3.4s, #64, lsl #16
 ; CHECK-NEXT:    add v25.4s, v25.4s, v26.4s
-; CHECK-NEXT:    fcmeq v26.4s, v5.4s, v5.4s
+; CHECK-NEXT:    fcmeq v26.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    add v17.4s, v17.4s, v18.4s
+; CHECK-NEXT:    fcmeq v18.4s, v4.4s, v4.4s
 ; CHECK-NEXT:    orr v4.4s, #64, lsl #16
-; CHECK-NEXT:    add v3.4s, v3.4s, v6.4s
-; CHECK-NEXT:    fcmeq v6.4s, v20.4s, v20.4s
-; CHECK-NEXT:    orr v5.4s, #64, lsl #16
-; CHECK-NEXT:    orr v17.4s, #64, lsl #16
-; CHECK-NEXT:    orr v18.4s, #64, lsl #16
-; CHECK-NEXT:    orr v20.4s, #64, lsl #16
-; CHECK-NEXT:    bit v0.16b, v7.16b, v21.16b
-; CHECK-NEXT:    mov v7.16b, v30.16b
-; CHECK-NEXT:    mov v16.16b, v31.16b
-; CHECK-NEXT:    bit v4.16b, v23.16b, v29.16b
-; CHECK-NEXT:    bit v5.16b, v25.16b, v26.16b
-; CHECK-NEXT:    bif v3.16b, v20.16b, v6.16b
-; CHECK-NEXT:    bsl v7.16b, v24.16b, v17.16b
-; CHECK-NEXT:    bsl v16.16b, v28.16b, v18.16b
-; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    uzp2 v1.8h, v4.8h, v1.8h
-; CHECK-NEXT:    uzp2 v2.8h, v7.8h, v5.8h
-; CHECK-NEXT:    uzp2 v3.8h, v3.8h, v16.8h
+; CHECK-NEXT:    mov v16.16b, v30.16b
+; CHECK-NEXT:    bsl v6.16b, v24.16b, v20.16b
+; CHECK-NEXT:    bif v1.16b, v2.16b, v31.16b
+; CHECK-NEXT:    mov v19.16b, v28.16b
+; CHECK-NEXT:    uzp2 v2.8h, v5.8h, v7.8h
+; CHECK-NEXT:    bit v0.16b, v25.16b, v26.16b
+; CHECK-NEXT:    bsl v16.16b, v23.16b, v21.16b
+; CHECK-NEXT:    bit v4.16b, v17.16b, v18.16b
+; CHECK-NEXT:    bsl v19.16b, v27.16b, v3.16b
+; CHECK-NEXT:    uzp2 v3.8h, v1.8h, v6.8h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v16.8h
+; CHECK-NEXT:    uzp2 v1.8h, v4.8h, v19.8h
 ; CHECK-NEXT:    ret
 entry:
   %c = sitofp <32 x i64> %a to <32 x bfloat>
@@ -905,162 +894,151 @@ entry:
 define <32 x bfloat> @utofp_v32i64_v32bf16(<32 x i64> %a) {
 ; CHECK-LABEL: utofp_v32i64_v32bf16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov x10, d2
-; CHECK-NEXT:    mov x9, v3.d[1]
-; CHECK-NEXT:    mov x8, v2.d[1]
-; CHECK-NEXT:    fmov x11, d3
-; CHECK-NEXT:    fmov x12, d0
-; CHECK-NEXT:    movi v3.4s, #1
-; CHECK-NEXT:    ucvtf s2, x10
-; CHECK-NEXT:    mov x10, v0.d[1]
-; CHECK-NEXT:    ucvtf s19, x9
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    ucvtf s16, x11
-; CHECK-NEXT:    mov x11, v6.d[1]
-; CHECK-NEXT:    ucvtf s0, x12
-; CHECK-NEXT:    ucvtf s18, x8
-; CHECK-NEXT:    mov x8, v1.d[1]
+; CHECK-NEXT:    ldp x8, x9, [sp, #32]
+; CHECK-NEXT:    mov x13, v2.d[1]
+; CHECK-NEXT:    ldp x10, x12, [sp, #96]
+; CHECK-NEXT:    fmov x14, d3
+; CHECK-NEXT:    movi v17.4s, #1
+; CHECK-NEXT:    ucvtf s18, x9
+; CHECK-NEXT:    ucvtf s16, x8
+; CHECK-NEXT:    ldp x8, x9, [sp, #48]
+; CHECK-NEXT:    ucvtf s23, x12
 ; CHECK-NEXT:    ucvtf s20, x10
-; CHECK-NEXT:    ucvtf s17, x9
-; CHECK-NEXT:    mov x9, v7.d[1]
-; CHECK-NEXT:    mov x10, v4.d[1]
-; CHECK-NEXT:    ucvtf s21, x11
-; CHECK-NEXT:    fmov x11, d6
-; CHECK-NEXT:    mov v2.s[1], v18.s[0]
-; CHECK-NEXT:    ucvtf s25, x8
-; CHECK-NEXT:    movi v6.4s, #127, msl #8
-; CHECK-NEXT:    mov v0.s[1], v20.s[0]
-; CHECK-NEXT:    ldp q24, q20, [sp, #32]
-; CHECK-NEXT:    ucvtf s22, x9
-; CHECK-NEXT:    fmov x9, d4
-; CHECK-NEXT:    ucvtf s1, x11
-; CHECK-NEXT:    ucvtf s26, x10
-; CHECK-NEXT:    fmov x11, d7
-; CHECK-NEXT:    mov v2.s[2], v16.s[0]
-; CHECK-NEXT:    ldp q18, q16, [sp]
-; CHECK-NEXT:    mov x8, v24.d[1]
-; CHECK-NEXT:    ucvtf s4, x9
-; CHECK-NEXT:    fmov x9, d5
-; CHECK-NEXT:    mov v0.s[2], v17.s[0]
-; CHECK-NEXT:    mov v1.s[1], v21.s[0]
-; CHECK-NEXT:    ucvtf s23, x11
-; CHECK-NEXT:    mov x11, v5.d[1]
-; CHECK-NEXT:    mov v2.s[3], v19.s[0]
+; CHECK-NEXT:    mov x10, v0.d[1]
+; CHECK-NEXT:    ucvtf s27, x13
 ; CHECK-NEXT:    ucvtf s21, x8
-; CHECK-NEXT:    mov x8, v20.d[1]
-; CHECK-NEXT:    ucvtf s17, x9
-; CHECK-NEXT:    fmov x9, d24
-; CHECK-NEXT:    mov v4.s[1], v26.s[0]
-; CHECK-NEXT:    mov v0.s[3], v25.s[0]
-; CHECK-NEXT:    ldp q26, q24, [sp, #96]
-; CHECK-NEXT:    mov v1.s[2], v23.s[0]
-; CHECK-NEXT:    ldp q25, q23, [sp, #64]
-; CHECK-NEXT:    ucvtf s7, x11
-; CHECK-NEXT:    ucvtf s27, x8
-; CHECK-NEXT:    fmov x8, d18
-; CHECK-NEXT:    ucvtf s5, x9
-; CHECK-NEXT:    mov x10, v26.d[1]
-; CHECK-NEXT:    mov x9, v18.d[1]
-; CHECK-NEXT:    fmov x11, d20
-; CHECK-NEXT:    mov v4.s[2], v17.s[0]
-; CHECK-NEXT:    mov v1.s[3], v22.s[0]
-; CHECK-NEXT:    ushr v19.4s, v2.4s, #16
-; CHECK-NEXT:    ucvtf s17, x8
-; CHECK-NEXT:    fmov x8, d26
-; CHECK-NEXT:    add v26.4s, v2.4s, v6.4s
+; CHECK-NEXT:    ldp x8, x11, [sp]
+; CHECK-NEXT:    mov v16.s[1], v18.s[0]
+; CHECK-NEXT:    ucvtf s24, x9
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    mov v20.s[1], v23.s[0]
 ; CHECK-NEXT:    ucvtf s22, x11
-; CHECK-NEXT:    mov x11, v25.d[1]
-; CHECK-NEXT:    mov v5.s[1], v21.s[0]
-; CHECK-NEXT:    ucvtf s28, x10
-; CHECK-NEXT:    fmov x10, d16
-; CHECK-NEXT:    ucvtf s21, x9
-; CHECK-NEXT:    fmov x9, d25
-; CHECK-NEXT:    ucvtf s18, x8
-; CHECK-NEXT:    mov x8, v16.d[1]
-; CHECK-NEXT:    mov v4.s[3], v7.s[0]
-; CHECK-NEXT:    and v19.16b, v19.16b, v3.16b
-; CHECK-NEXT:    ucvtf s16, x10
-; CHECK-NEXT:    fmov x10, d24
+; CHECK-NEXT:    ldp x11, x12, [sp, #16]
+; CHECK-NEXT:    ucvtf s19, x8
+; CHECK-NEXT:    mov x8, v3.d[1]
+; CHECK-NEXT:    mov v16.s[2], v21.s[0]
 ; CHECK-NEXT:    ucvtf s25, x11
-; CHECK-NEXT:    ucvtf s20, x9
-; CHECK-NEXT:    mov x9, v24.d[1]
-; CHECK-NEXT:    mov v17.s[1], v21.s[0]
-; CHECK-NEXT:    fmov x11, d23
-; CHECK-NEXT:    mov v18.s[1], v28.s[0]
-; CHECK-NEXT:    ucvtf s24, x8
-; CHECK-NEXT:    ucvtf s21, x10
-; CHECK-NEXT:    mov x10, v23.d[1]
-; CHECK-NEXT:    mov v5.s[2], v22.s[0]
-; CHECK-NEXT:    ushr v22.4s, v1.4s, #16
-; CHECK-NEXT:    ushr v28.4s, v0.4s, #16
+; CHECK-NEXT:    ldp x9, x11, [sp, #112]
+; CHECK-NEXT:    mov v19.s[1], v22.s[0]
+; CHECK-NEXT:    ucvtf s22, x12
+; CHECK-NEXT:    ucvtf s26, x9
+; CHECK-NEXT:    ldp x9, x12, [sp, #64]
 ; CHECK-NEXT:    ucvtf s23, x11
-; CHECK-NEXT:    mov v20.s[1], v25.s[0]
-; CHECK-NEXT:    ucvtf s25, x9
-; CHECK-NEXT:    mov v17.s[2], v16.s[0]
-; CHECK-NEXT:    add v16.4s, v19.4s, v26.4s
-; CHECK-NEXT:    ushr v26.4s, v4.4s, #16
-; CHECK-NEXT:    mov v18.s[2], v21.s[0]
-; CHECK-NEXT:    ucvtf s7, x10
-; CHECK-NEXT:    and v22.16b, v22.16b, v3.16b
-; CHECK-NEXT:    mov v5.s[3], v27.s[0]
-; CHECK-NEXT:    and v21.16b, v28.16b, v3.16b
-; CHECK-NEXT:    fcmeq v19.4s, v2.4s, v2.4s
-; CHECK-NEXT:    mov v20.s[2], v23.s[0]
-; CHECK-NEXT:    add v23.4s, v0.4s, v6.4s
+; CHECK-NEXT:    mov v16.s[3], v24.s[0]
+; CHECK-NEXT:    fmov x11, d2
+; CHECK-NEXT:    ucvtf s24, x12
+; CHECK-NEXT:    ucvtf s2, x9
+; CHECK-NEXT:    mov x9, v6.d[1]
+; CHECK-NEXT:    ldp x12, x13, [sp, #80]
+; CHECK-NEXT:    ucvtf s21, x11
+; CHECK-NEXT:    mov x11, v4.d[1]
+; CHECK-NEXT:    mov v19.s[2], v25.s[0]
+; CHECK-NEXT:    mov v20.s[2], v26.s[0]
+; CHECK-NEXT:    ushr v25.4s, v16.4s, #16
+; CHECK-NEXT:    ucvtf s26, x14
+; CHECK-NEXT:    ucvtf s3, x12
+; CHECK-NEXT:    mov v2.s[1], v24.s[0]
+; CHECK-NEXT:    ucvtf s24, x10
+; CHECK-NEXT:    fmov x10, d6
+; CHECK-NEXT:    fmov x12, d0
+; CHECK-NEXT:    ucvtf s6, x9
+; CHECK-NEXT:    mov v21.s[1], v27.s[0]
+; CHECK-NEXT:    ucvtf s27, x11
+; CHECK-NEXT:    fmov x11, d7
+; CHECK-NEXT:    mov v19.s[3], v22.s[0]
+; CHECK-NEXT:    mov v20.s[3], v23.s[0]
+; CHECK-NEXT:    add v22.4s, v16.4s, v18.4s
+; CHECK-NEXT:    mov v2.s[2], v3.s[0]
+; CHECK-NEXT:    ucvtf s3, x10
+; CHECK-NEXT:    fmov x10, d4
+; CHECK-NEXT:    ucvtf s0, x12
+; CHECK-NEXT:    and v23.16b, v25.16b, v17.16b
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x12, d5
+; CHECK-NEXT:    mov v21.s[2], v26.s[0]
+; CHECK-NEXT:    ucvtf s25, x13
+; CHECK-NEXT:    ucvtf s4, x10
+; CHECK-NEXT:    fmov x10, d1
+; CHECK-NEXT:    add v26.4s, v20.4s, v18.4s
+; CHECK-NEXT:    mov v3.s[1], v6.s[0]
+; CHECK-NEXT:    ucvtf s6, x11
+; CHECK-NEXT:    mov x11, v5.d[1]
+; CHECK-NEXT:    ucvtf s5, x8
+; CHECK-NEXT:    mov v0.s[1], v24.s[0]
+; CHECK-NEXT:    add v22.4s, v23.4s, v22.4s
+; CHECK-NEXT:    ucvtf s1, x10
+; CHECK-NEXT:    mov x10, v7.d[1]
+; CHECK-NEXT:    ucvtf s7, x12
+; CHECK-NEXT:    mov v4.s[1], v27.s[0]
+; CHECK-NEXT:    ushr v23.4s, v19.4s, #16
+; CHECK-NEXT:    mov v2.s[3], v25.s[0]
+; CHECK-NEXT:    mov v3.s[2], v6.s[0]
+; CHECK-NEXT:    add v25.4s, v19.4s, v18.4s
+; CHECK-NEXT:    ushr v24.4s, v20.4s, #16
+; CHECK-NEXT:    mov v21.s[3], v5.s[0]
+; CHECK-NEXT:    ucvtf s5, x11
+; CHECK-NEXT:    fcmeq v29.4s, v20.4s, v20.4s
+; CHECK-NEXT:    ucvtf s6, x10
+; CHECK-NEXT:    and v23.16b, v23.16b, v17.16b
+; CHECK-NEXT:    mov v0.s[2], v1.s[0]
+; CHECK-NEXT:    ucvtf s1, x9
+; CHECK-NEXT:    mov v4.s[2], v7.s[0]
+; CHECK-NEXT:    and v24.16b, v24.16b, v17.16b
+; CHECK-NEXT:    fcmeq v7.4s, v16.4s, v16.4s
+; CHECK-NEXT:    orr v16.4s, #64, lsl #16
+; CHECK-NEXT:    fcmeq v31.4s, v2.4s, v2.4s
+; CHECK-NEXT:    add v27.4s, v21.4s, v18.4s
+; CHECK-NEXT:    orr v20.4s, #64, lsl #16
+; CHECK-NEXT:    mov v3.s[3], v6.s[0]
+; CHECK-NEXT:    add v6.4s, v23.4s, v25.4s
+; CHECK-NEXT:    ushr v23.4s, v21.4s, #16
+; CHECK-NEXT:    mov v0.s[3], v1.s[0]
+; CHECK-NEXT:    mov v4.s[3], v5.s[0]
+; CHECK-NEXT:    ushr v1.4s, v2.4s, #16
+; CHECK-NEXT:    add v24.4s, v24.4s, v26.4s
+; CHECK-NEXT:    add v25.4s, v2.4s, v18.4s
+; CHECK-NEXT:    fcmeq v5.4s, v19.4s, v19.4s
+; CHECK-NEXT:    and v23.16b, v23.16b, v17.16b
+; CHECK-NEXT:    orr v19.4s, #64, lsl #16
 ; CHECK-NEXT:    orr v2.4s, #64, lsl #16
-; CHECK-NEXT:    mov v17.s[3], v24.s[0]
-; CHECK-NEXT:    add v24.4s, v1.4s, v6.4s
-; CHECK-NEXT:    fcmeq v27.4s, v1.4s, v1.4s
-; CHECK-NEXT:    mov v18.s[3], v25.s[0]
-; CHECK-NEXT:    add v25.4s, v4.4s, v6.4s
-; CHECK-NEXT:    orr v1.4s, #64, lsl #16
-; CHECK-NEXT:    bit v2.16b, v16.16b, v19.16b
-; CHECK-NEXT:    mov v20.s[3], v7.s[0]
-; CHECK-NEXT:    add v22.4s, v22.4s, v24.4s
-; CHECK-NEXT:    add v7.4s, v21.4s, v23.4s
-; CHECK-NEXT:    ushr v24.4s, v17.4s, #16
-; CHECK-NEXT:    and v23.16b, v26.16b, v3.16b
-; CHECK-NEXT:    ushr v26.4s, v5.4s, #16
-; CHECK-NEXT:    ushr v28.4s, v18.4s, #16
-; CHECK-NEXT:    add v30.4s, v17.4s, v6.4s
-; CHECK-NEXT:    add v31.4s, v18.4s, v6.4s
-; CHECK-NEXT:    fcmeq v21.4s, v0.4s, v0.4s
-; CHECK-NEXT:    orr v0.4s, #64, lsl #16
-; CHECK-NEXT:    bit v1.16b, v22.16b, v27.16b
-; CHECK-NEXT:    ushr v29.4s, v20.4s, #16
-; CHECK-NEXT:    and v24.16b, v24.16b, v3.16b
-; CHECK-NEXT:    add v23.4s, v23.4s, v25.4s
-; CHECK-NEXT:    and v28.16b, v28.16b, v3.16b
-; CHECK-NEXT:    and v25.16b, v26.16b, v3.16b
-; CHECK-NEXT:    add v26.4s, v5.4s, v6.4s
-; CHECK-NEXT:    add v6.4s, v20.4s, v6.4s
-; CHECK-NEXT:    and v3.16b, v29.16b, v3.16b
-; CHECK-NEXT:    add v24.4s, v24.4s, v30.4s
-; CHECK-NEXT:    fcmeq v30.4s, v17.4s, v17.4s
-; CHECK-NEXT:    add v28.4s, v28.4s, v31.4s
-; CHECK-NEXT:    fcmeq v31.4s, v18.4s, v18.4s
-; CHECK-NEXT:    fcmeq v29.4s, v4.4s, v4.4s
+; CHECK-NEXT:    ushr v28.4s, v3.4s, #16
+; CHECK-NEXT:    and v1.16b, v1.16b, v17.16b
+; CHECK-NEXT:    bsl v7.16b, v22.16b, v16.16b
+; CHECK-NEXT:    ushr v26.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v30.4s, v4.4s, #16
+; CHECK-NEXT:    add v23.4s, v23.4s, v27.4s
+; CHECK-NEXT:    bsl v5.16b, v6.16b, v19.16b
+; CHECK-NEXT:    mov v6.16b, v29.16b
+; CHECK-NEXT:    and v27.16b, v28.16b, v17.16b
+; CHECK-NEXT:    add v28.4s, v3.4s, v18.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v25.4s
+; CHECK-NEXT:    and v25.16b, v26.16b, v17.16b
+; CHECK-NEXT:    add v26.4s, v0.4s, v18.4s
+; CHECK-NEXT:    and v17.16b, v30.16b, v17.16b
+; CHECK-NEXT:    add v18.4s, v4.4s, v18.4s
+; CHECK-NEXT:    fcmeq v30.4s, v21.4s, v21.4s
+; CHECK-NEXT:    orr v21.4s, #64, lsl #16
+; CHECK-NEXT:    add v27.4s, v27.4s, v28.4s
+; CHECK-NEXT:    fcmeq v28.4s, v3.4s, v3.4s
+; CHECK-NEXT:    orr v3.4s, #64, lsl #16
 ; CHECK-NEXT:    add v25.4s, v25.4s, v26.4s
-; CHECK-NEXT:    fcmeq v26.4s, v5.4s, v5.4s
+; CHECK-NEXT:    fcmeq v26.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    add v17.4s, v17.4s, v18.4s
+; CHECK-NEXT:    fcmeq v18.4s, v4.4s, v4.4s
 ; CHECK-NEXT:    orr v4.4s, #64, lsl #16
-; CHECK-NEXT:    add v3.4s, v3.4s, v6.4s
-; CHECK-NEXT:    fcmeq v6.4s, v20.4s, v20.4s
-; CHECK-NEXT:    orr v5.4s, #64, lsl #16
-; CHECK-NEXT:    orr v17.4s, #64, lsl #16
-; CHECK-NEXT:    orr v18.4s, #64, lsl #16
-; CHECK-NEXT:    orr v20.4s, #64, lsl #16
-; CHECK-NEXT:    bit v0.16b, v7.16b, v21.16b
-; CHECK-NEXT:    mov v7.16b, v30.16b
-; CHECK-NEXT:    mov v16.16b, v31.16b
-; CHECK-NEXT:    bit v4.16b, v23.16b, v29.16b
-; CHECK-NEXT:    bit v5.16b, v25.16b, v26.16b
-; CHECK-NEXT:    bif v3.16b, v20.16b, v6.16b
-; CHECK-NEXT:    bsl v7.16b, v24.16b, v17.16b
-; CHECK-NEXT:    bsl v16.16b, v28.16b, v18.16b
-; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    uzp2 v1.8h, v4.8h, v1.8h
-; CHECK-NEXT:    uzp2 v2.8h, v7.8h, v5.8h
-; CHECK-NEXT:    uzp2 v3.8h, v3.8h, v16.8h
+; CHECK-NEXT:    mov v16.16b, v30.16b
+; CHECK-NEXT:    bsl v6.16b, v24.16b, v20.16b
+; CHECK-NEXT:    bif v1.16b, v2.16b, v31.16b
+; CHECK-NEXT:    mov v19.16b, v28.16b
+; CHECK-NEXT:    uzp2 v2.8h, v5.8h, v7.8h
+; CHECK-NEXT:    bit v0.16b, v25.16b, v26.16b
+; CHECK-NEXT:    bsl v16.16b, v23.16b, v21.16b
+; CHECK-NEXT:    bit v4.16b, v17.16b, v18.16b
+; CHECK-NEXT:    bsl v19.16b, v27.16b, v3.16b
+; CHECK-NEXT:    uzp2 v3.8h, v1.8h, v6.8h
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v16.8h
+; CHECK-NEXT:    uzp2 v1.8h, v4.8h, v19.8h
 ; CHECK-NEXT:    ret
 entry:
   %c = uitofp <32 x i64> %a to <32 x bfloat>
diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index c91de8f3a0a47..e3c623371448b 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -8,224 +8,209 @@
 define dso_local void @run_test() local_unnamed_addr uwtable {
 ; CHECK-LABEL: run_test:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #208
-; CHECK-NEXT:    .cfi_def_cfa_offset 208
-; CHECK-NEXT:    stp d15, d14, [sp, #96] // 16-byte Folded Spill
-; CHECK-NEXT:    stp d13, d12, [sp, #112] // 16-byte Folded Spill
-; CHECK-NEXT:    stp d11, d10, [sp, #128] // 16-byte Folded Spill
-; CHECK-NEXT:    stp d9, d8, [sp, #144] // 16-byte Folded Spill
-; CHECK-NEXT:    str x23, [sp, #160] // 8-byte Folded Spill
-; CHECK-NEXT:    stp x22, x21, [sp, #176] // 16-byte Folded Spill
-; CHECK-NEXT:    stp x20, x19, [sp, #192] // 16-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #192
+; CHECK-NEXT:    .cfi_def_cfa_offset 192
+; CHECK-NEXT:    stp d15, d14, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT:    stp d13, d12, [sp, #128] // 16-byte Folded Spill
+; CHECK-NEXT:    stp d11, d10, [sp, #144] // 16-byte Folded Spill
+; CHECK-NEXT:    stp d9, d8, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #176] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w20, -16
-; CHECK-NEXT:    .cfi_offset w21, -24
-; CHECK-NEXT:    .cfi_offset w22, -32
-; CHECK-NEXT:    .cfi_offset w23, -48
-; CHECK-NEXT:    .cfi_offset b8, -56
-; CHECK-NEXT:    .cfi_offset b9, -64
-; CHECK-NEXT:    .cfi_offset b10, -72
-; CHECK-NEXT:    .cfi_offset b11, -80
-; CHECK-NEXT:    .cfi_offset b12, -88
-; CHECK-NEXT:    .cfi_offset b13, -96
-; CHECK-NEXT:    .cfi_offset b14, -104
-; CHECK-NEXT:    .cfi_offset b15, -112
-; CHECK-NEXT:    movi v2.2d, #0000000000000000
-; CHECK-NEXT:    // implicit-def: $q1
+; CHECK-NEXT:    .cfi_offset b8, -24
+; CHECK-NEXT:    .cfi_offset b9, -32
+; CHECK-NEXT:    .cfi_offset b10, -40
+; CHECK-NEXT:    .cfi_offset b11, -48
+; CHECK-NEXT:    .cfi_offset b12, -56
+; CHECK-NEXT:    .cfi_offset b13, -64
+; CHECK-NEXT:    .cfi_offset b14, -72
+; CHECK-NEXT:    .cfi_offset b15, -80
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    adrp x14, B+48
+; CHECK-NEXT:    add x14, x14, :lo12:B+48
+; CHECK-NEXT:    // implicit-def: $q18
 ; CHECK-NEXT:    mov x8, xzr
-; CHECK-NEXT:    adrp x9, B+48
-; CHECK-NEXT:    add x9, x9, :lo12:B+48
+; CHECK-NEXT:    mov w9, #8 // =0x8
 ; CHECK-NEXT:    adrp x10, A
 ; CHECK-NEXT:    add x10, x10, :lo12:A
 ; CHECK-NEXT:    mov x11, xzr
-; CHECK-NEXT:    // kill: killed $q1
-; CHECK-NEXT:    // implicit-def: $q1
+; CHECK-NEXT:    // kill: killed $q18
+; CHECK-NEXT:    // implicit-def: $q18
 ; CHECK-NEXT:    mov x12, xzr
+; CHECK-NEXT:    mov x13, x14
 ; CHECK-NEXT:    // implicit-def: $q0
+; CHECK-NEXT:    // implicit-def: $q2
 ; CHECK-NEXT:    // implicit-def: $q3
 ; CHECK-NEXT:    // implicit-def: $q4
 ; CHECK-NEXT:    // implicit-def: $q5
-; CHECK-NEXT:    // implicit-def: $q7
-; CHECK-NEXT:    // implicit-def: $q10
-; CHECK-NEXT:    // implicit-def: $q17
 ; CHECK-NEXT:    // implicit-def: $q6
-; CHECK-NEXT:    // implicit-def: $q18
+; CHECK-NEXT:    // implicit-def: $q16
+; CHECK-NEXT:    // implicit-def: $q17
+; CHECK-NEXT:    // implicit-def: $q7
 ; CHECK-NEXT:    // implicit-def: $q19
 ; CHECK-NEXT:    // implicit-def: $q20
 ; CHECK-NEXT:    // implicit-def: $q21
 ; CHECK-NEXT:    // implicit-def: $q22
-; CHECK-NEXT:    // implicit-def: $q23
 ; CHECK-NEXT:    // implicit-def: $q24
-; CHECK-NEXT:    // implicit-def: $q9
+; CHECK-NEXT:    // implicit-def: $q23
+; CHECK-NEXT:    // implicit-def: $q25
+; CHECK-NEXT:    // implicit-def: $q26
 ; CHECK-NEXT:    // implicit-def: $q27
-; CHECK-NEXT:    // implicit-def: $q12
-; CHECK-NEXT:    // implicit-def: $q28
-; CHECK-NEXT:    // implicit-def: $q14
-; CHECK-NEXT:    // implicit-def: $q15
-; CHECK-NEXT:    // implicit-def: $q29
 ; CHECK-NEXT:    // implicit-def: $q30
+; CHECK-NEXT:    // implicit-def: $q8
 ; CHECK-NEXT:    // implicit-def: $q11
-; CHECK-NEXT:    // implicit-def: $q31
+; CHECK-NEXT:    // implicit-def: $q12
+; CHECK-NEXT:    // implicit-def: $q29
 ; CHECK-NEXT:    // implicit-def: $q13
-; CHECK-NEXT:    // kill: killed $q1
-; CHECK-NEXT:    // implicit-def: $q1
-; CHECK-NEXT:    // kill: killed $q1
+; CHECK-NEXT:    // implicit-def: $q10
+; CHECK-NEXT:    // implicit-def: $q15
+; CHECK-NEXT:    // kill: killed $q18
+; CHECK-NEXT:    // implicit-def: $q18
+; CHECK-NEXT:    // kill: killed $q18
 ; CHECK-NEXT:  .LBB0_1: // %for.cond1.preheader
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    stp q29, q15, [sp] // 32-byte Folded Spill
-; CHECK-NEXT:    ldr q15, [x8]
+; CHECK-NEXT:    ldr x17, [x8]
 ; CHECK-NEXT:    ldr x15, [x8]
-; CHECK-NEXT:    str q14, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT:    add x20, x10, x11
-; CHECK-NEXT:    mov v8.16b, v28.16b
-; CHECK-NEXT:    fmov x2, d15
-; CHECK-NEXT:    mov x17, v15.d[1]
-; CHECK-NEXT:    ldr q14, [x8]
+; CHECK-NEXT:    mov v18.16b, v0.16b
+; CHECK-NEXT:    ldr x16, [x9]
+; CHECK-NEXT:    stp q15, q4, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    add x5, x10, x11
+; CHECK-NEXT:    mul x1, x15, x17
+; CHECK-NEXT:    ldr x2, [x13], #64
+; CHECK-NEXT:    ldr x5, [x5, #128]
+; CHECK-NEXT:    stp q7, q23, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT:    ldr x14, [x14, #8]
+; CHECK-NEXT:    mul x0, x17, x17
+; CHECK-NEXT:    ldr q23, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT:    mov v9.16b, v30.16b
+; CHECK-NEXT:    mov v30.16b, v25.16b
+; CHECK-NEXT:    mov v25.16b, v20.16b
+; CHECK-NEXT:    mov v20.16b, v6.16b
+; CHECK-NEXT:    mul x18, x16, x17
+; CHECK-NEXT:    mov v6.16b, v1.16b
 ; CHECK-NEXT:    mov v28.16b, v24.16b
-; CHECK-NEXT:    mov v24.16b, v20.16b
-; CHECK-NEXT:    mov v20.16b, v17.16b
-; CHECK-NEXT:    fmov x13, d14
-; CHECK-NEXT:    mov x16, v14.d[1]
-; CHECK-NEXT:    mov v17.16b, v5.16b
-; CHECK-NEXT:    mul x3, x2, x15
-; CHECK-NEXT:    ldr q14, [x9], #64
-; CHECK-NEXT:    ldr q5, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr x6, [x8]
-; CHECK-NEXT:    ldr x20, [x20, #128]
-; CHECK-NEXT:    mul x1, x17, x15
-; CHECK-NEXT:    mov x14, v14.d[1]
-; CHECK-NEXT:    fmov x5, d14
-; CHECK-NEXT:    mov v29.16b, v21.16b
-; CHECK-NEXT:    mov v21.16b, v0.16b
-; CHECK-NEXT:    mov v25.16b, v6.16b
-; CHECK-NEXT:    mul x18, x13, x15
-; CHECK-NEXT:    mov v6.16b, v2.16b
-; CHECK-NEXT:    mov v26.16b, v22.16b
-; CHECK-NEXT:    fmov d15, x3
-; CHECK-NEXT:    mov v22.16b, v18.16b
-; CHECK-NEXT:    mov v18.16b, v7.16b
-; CHECK-NEXT:    mul x0, x16, x15
-; CHECK-NEXT:    mov v7.16b, v3.16b
-; CHECK-NEXT:    mov v16.16b, v4.16b
+; CHECK-NEXT:    fmov d14, x1
+; CHECK-NEXT:    mov v24.16b, v19.16b
+; CHECK-NEXT:    mov v19.16b, v5.16b
+; CHECK-NEXT:    mul x4, x2, x17
+; CHECK-NEXT:    mov v31.16b, v26.16b
+; CHECK-NEXT:    mov v26.16b, v21.16b
+; CHECK-NEXT:    fmov d15, x0
+; CHECK-NEXT:    mov v21.16b, v16.16b
+; CHECK-NEXT:    mov v16.16b, v2.16b
+; CHECK-NEXT:    mov v0.16b, v14.16b
+; CHECK-NEXT:    mul x20, x2, x5
+; CHECK-NEXT:    mov v7.16b, v10.16b
+; CHECK-NEXT:    mov v10.16b, v17.16b
+; CHECK-NEXT:    mov v17.16b, v3.16b
 ; CHECK-NEXT:    add x11, x11, #8
-; CHECK-NEXT:    add x12, x12, #1
-; CHECK-NEXT:    mov v15.d[1], x1
-; CHECK-NEXT:    mul x4, x14, x15
+; CHECK-NEXT:    mov v15.d[1], x18
+; CHECK-NEXT:    mul x3, x14, x17
 ; CHECK-NEXT:    cmp x11, #64
-; CHECK-NEXT:    fmov d14, x18
-; CHECK-NEXT:    mul x15, x5, x15
-; CHECK-NEXT:    add v5.2d, v5.2d, v15.2d
-; CHECK-NEXT:    mul x21, x2, x6
-; CHECK-NEXT:    mov v14.d[1], x0
-; CHECK-NEXT:    mul x2, x2, x20
-; CHECK-NEXT:    fmov d0, x15
-; CHECK-NEXT:    str q5, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT:    ldr q5, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    mul x22, x13, x20
-; CHECK-NEXT:    add v5.2d, v5.2d, v14.2d
-; CHECK-NEXT:    fmov d3, x21
-; CHECK-NEXT:    mul x19, x17, x6
-; CHECK-NEXT:    mov v0.d[1], x4
-; CHECK-NEXT:    fmov d1, x2
-; CHECK-NEXT:    mul x17, x17, x20
-; CHECK-NEXT:    str q5, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT:    add v5.2d, v13.2d, v14.2d
-; CHECK-NEXT:    fmov d2, x22
-; CHECK-NEXT:    ldr q13, [sp, #80] // 16-byte Folded Reload
-; CHECK-NEXT:    mul x7, x16, x6
-; CHECK-NEXT:    ldp q15, q14, [sp, #16] // 32-byte Folded Reload
-; CHECK-NEXT:    mov v3.d[1], x19
-; CHECK-NEXT:    add v13.2d, v13.2d, v0.2d
-; CHECK-NEXT:    mul x16, x16, x20
-; CHECK-NEXT:    mov v1.d[1], x17
-; CHECK-NEXT:    mul x23, x5, x20
-; CHECK-NEXT:    str q13, [sp, #80] // 16-byte Folded Spill
-; CHECK-NEXT:    mov v13.16b, v5.16b
-; CHECK-NEXT:    mov v5.16b, v17.16b
-; CHECK-NEXT:    mov v17.16b, v20.16b
-; CHECK-NEXT:    mov v20.16b, v24.16b
-; CHECK-NEXT:    mul x13, x13, x6
-; CHECK-NEXT:    mov v24.16b, v28.16b
-; CHECK-NEXT:    add v11.2d, v11.2d, v3.2d
-; CHECK-NEXT:    mov v2.d[1], x16
+; CHECK-NEXT:    mov v0.d[1], x1
+; CHECK-NEXT:    fmov d1, x4
+; CHECK-NEXT:    add x12, x12, #1
+; CHECK-NEXT:    mul x17, x17, x5
+; CHECK-NEXT:    fmov d5, x20
+; CHECK-NEXT:    mul x6, x15, x15
+; CHECK-NEXT:    add v23.2d, v23.2d, v0.2d
+; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    mov v1.d[1], x3
+; CHECK-NEXT:    mul x7, x15, x5
+; CHECK-NEXT:    add v0.2d, v0.2d, v15.2d
+; CHECK-NEXT:    fmov d2, x17
+; CHECK-NEXT:    mul x0, x14, x5
+; CHECK-NEXT:    fmov d4, x6
+; CHECK-NEXT:    mul x19, x16, x5
+; CHECK-NEXT:    stp q0, q23, [sp, #64] // 32-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT:    fmov d3, x7
+; CHECK-NEXT:    ldr q23, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT:    mul x17, x2, x15
+; CHECK-NEXT:    add v0.2d, v0.2d, v15.2d
+; CHECK-NEXT:    ldr q15, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    mov v5.d[1], x0
+; CHECK-NEXT:    mov v4.d[1], x6
+; CHECK-NEXT:    mul x16, x16, x15
+; CHECK-NEXT:    mov v3.d[1], x7
 ; CHECK-NEXT:    add v15.2d, v15.2d, v1.2d
-; CHECK-NEXT:    add v27.2d, v27.2d, v3.2d
-; CHECK-NEXT:    mul x18, x14, x20
-; CHECK-NEXT:    add v23.2d, v23.2d, v3.2d
-; CHECK-NEXT:    add v19.2d, v19.2d, v3.2d
-; CHECK-NEXT:    fmov d4, x23
-; CHECK-NEXT:    add v10.2d, v10.2d, v3.2d
-; CHECK-NEXT:    mul x15, x5, x6
-; CHECK-NEXT:    fmov d0, x13
-; CHECK-NEXT:    add v14.2d, v14.2d, v2.2d
-; CHECK-NEXT:    add v2.2d, v6.2d, v3.2d
-; CHECK-NEXT:    mul x14, x14, x6
-; CHECK-NEXT:    mov v3.16b, v7.16b
-; CHECK-NEXT:    mov v7.16b, v18.16b
-; CHECK-NEXT:    mov v4.d[1], x18
-; CHECK-NEXT:    mov v18.16b, v22.16b
-; CHECK-NEXT:    mov v0.d[1], x7
-; CHECK-NEXT:    fmov d1, x15
-; CHECK-NEXT:    add v28.2d, v8.2d, v4.2d
-; CHECK-NEXT:    mov v1.d[1], x14
-; CHECK-NEXT:    add v31.2d, v31.2d, v0.2d
-; CHECK-NEXT:    add v30.2d, v30.2d, v0.2d
+; CHECK-NEXT:    mov v2.d[1], x19
+; CHECK-NEXT:    str q0, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    mov v1.16b, v6.16b
+; CHECK-NEXT:    mul x14, x14, x15
+; CHECK-NEXT:    mov v6.16b, v20.16b
+; CHECK-NEXT:    mov v20.16b, v25.16b
+; CHECK-NEXT:    fmov d0, x17
+; CHECK-NEXT:    mov v25.16b, v30.16b
+; CHECK-NEXT:    add v30.2d, v9.2d, v5.2d
+; CHECK-NEXT:    mov v5.16b, v19.16b
+; CHECK-NEXT:    mov v19.16b, v24.16b
+; CHECK-NEXT:    add v11.2d, v11.2d, v3.2d
+; CHECK-NEXT:    mov v14.d[1], x16
+; CHECK-NEXT:    mov v3.16b, v17.16b
+; CHECK-NEXT:    mov v17.16b, v10.16b
+; CHECK-NEXT:    mov v10.16b, v7.16b
+; CHECK-NEXT:    add v8.2d, v8.2d, v2.2d
+; CHECK-NEXT:    mov v2.16b, v16.16b
+; CHECK-NEXT:    mov v0.d[1], x14
+; CHECK-NEXT:    mov v16.16b, v21.16b
+; CHECK-NEXT:    mov v21.16b, v26.16b
+; CHECK-NEXT:    add v13.2d, v13.2d, v4.2d
+; CHECK-NEXT:    add v26.2d, v31.2d, v4.2d
+; CHECK-NEXT:    add v24.2d, v28.2d, v4.2d
+; CHECK-NEXT:    add v19.2d, v19.2d, v4.2d
+; CHECK-NEXT:    add v6.2d, v6.2d, v4.2d
+; CHECK-NEXT:    add v1.2d, v1.2d, v4.2d
+; CHECK-NEXT:    ldp q4, q7, [sp, #16] // 32-byte Folded Reload
+; CHECK-NEXT:    add v10.2d, v10.2d, v14.2d
+; CHECK-NEXT:    add v29.2d, v29.2d, v14.2d
+; CHECK-NEXT:    add v27.2d, v27.2d, v14.2d
+; CHECK-NEXT:    add v23.2d, v23.2d, v14.2d
+; CHECK-NEXT:    add v22.2d, v22.2d, v14.2d
+; CHECK-NEXT:    add v20.2d, v20.2d, v14.2d
+; CHECK-NEXT:    add v16.2d, v16.2d, v14.2d
+; CHECK-NEXT:    add v7.2d, v7.2d, v14.2d
+; CHECK-NEXT:    add v5.2d, v5.2d, v14.2d
+; CHECK-NEXT:    add v3.2d, v3.2d, v14.2d
+; CHECK-NEXT:    add v2.2d, v2.2d, v14.2d
 ; CHECK-NEXT:    add v12.2d, v12.2d, v0.2d
-; CHECK-NEXT:    add v24.2d, v24.2d, v0.2d
-; CHECK-NEXT:    add v22.2d, v26.2d, v0.2d
-; CHECK-NEXT:    add v20.2d, v20.2d, v0.2d
-; CHECK-NEXT:    add v18.2d, v18.2d, v0.2d
+; CHECK-NEXT:    add v25.2d, v25.2d, v0.2d
+; CHECK-NEXT:    add v21.2d, v21.2d, v0.2d
 ; CHECK-NEXT:    add v17.2d, v17.2d, v0.2d
-; CHECK-NEXT:    add v7.2d, v7.2d, v0.2d
-; CHECK-NEXT:    add v4.2d, v16.2d, v0.2d
-; CHECK-NEXT:    add v3.2d, v3.2d, v0.2d
-; CHECK-NEXT:    mov v0.16b, v21.16b
-; CHECK-NEXT:    mov v21.16b, v29.16b
-; CHECK-NEXT:    ldr q29, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    add v9.2d, v9.2d, v1.2d
-; CHECK-NEXT:    add v6.2d, v25.2d, v1.2d
-; CHECK-NEXT:    add v5.2d, v5.2d, v1.2d
-; CHECK-NEXT:    add v29.2d, v29.2d, v1.2d
-; CHECK-NEXT:    add v21.2d, v21.2d, v1.2d
-; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    add v4.2d, v4.2d, v0.2d
+; CHECK-NEXT:    add v0.2d, v18.2d, v0.2d
+; CHECK-NEXT:    mov x14, x13
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %for.cond.cleanup
-; CHECK-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp q28, q18, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    adrp x8, C
 ; CHECK-NEXT:    add x8, x8, :lo12:C
-; CHECK-NEXT:    stp q11, q30, [x8, #80]
-; CHECK-NEXT:    ldp x20, x19, [sp, #192] // 16-byte Folded Reload
-; CHECK-NEXT:    str q1, [x8]
-; CHECK-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    ldr x23, [sp, #160] // 8-byte Folded Reload
-; CHECK-NEXT:    stp q15, q14, [x8, #144]
-; CHECK-NEXT:    ldp x22, x21, [sp, #176] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q1, q13, [x8, #16]
-; CHECK-NEXT:    ldr q1, [sp, #80] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q28, q12, [x8, #176]
-; CHECK-NEXT:    ldp d13, d12, [sp, #112] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q1, q31, [x8, #48]
-; CHECK-NEXT:    ldp d15, d14, [sp, #96] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q9, q24, [x8, #240]
-; CHECK-NEXT:    ldp d9, d8, [sp, #144] // 16-byte Folded Reload
-; CHECK-NEXT:    stp q19, q18, [x8, #336]
-; CHECK-NEXT:    stp q10, q7, [x8, #400]
-; CHECK-NEXT:    ldp d11, d10, [sp, #128] // 16-byte Folded Reload
-; CHECK-NEXT:    str q29, [x8, #112]
-; CHECK-NEXT:    str q27, [x8, #208]
-; CHECK-NEXT:    stp q23, q22, [x8, #272]
+; CHECK-NEXT:    ldp x20, x19, [sp, #176] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q10, q13, [x8, #64]
+; CHECK-NEXT:    stp q28, q18, [x8]
+; CHECK-NEXT:    ldr q18, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q29, q12, [x8, #96]
+; CHECK-NEXT:    ldp d13, d12, [sp, #128] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q18, q15, [x8, #32]
+; CHECK-NEXT:    ldp d15, d14, [sp, #112] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q11, q8, [x8, #144]
+; CHECK-NEXT:    ldp d9, d8, [sp, #160] // 16-byte Folded Reload
+; CHECK-NEXT:    stp q30, q27, [x8, #176]
+; CHECK-NEXT:    ldp d11, d10, [sp, #144] // 16-byte Folded Reload
+; CHECK-NEXT:    str q26, [x8, #208]
+; CHECK-NEXT:    stp q25, q23, [x8, #240]
+; CHECK-NEXT:    stp q24, q22, [x8, #272]
 ; CHECK-NEXT:    stp q21, q20, [x8, #304]
-; CHECK-NEXT:    stp q6, q17, [x8, #368]
-; CHECK-NEXT:    stp q5, q4, [x8, #432]
-; CHECK-NEXT:    stp q2, q3, [x8, #464]
+; CHECK-NEXT:    stp q19, q7, [x8, #336]
+; CHECK-NEXT:    stp q17, q16, [x8, #368]
+; CHECK-NEXT:    stp q6, q5, [x8, #400]
+; CHECK-NEXT:    stp q4, q3, [x8, #432]
+; CHECK-NEXT:    stp q1, q2, [x8, #464]
 ; CHECK-NEXT:    str q0, [x8, #496]
-; CHECK-NEXT:    add sp, sp, #208
+; CHECK-NEXT:    add sp, sp, #192
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w19
 ; CHECK-NEXT:    .cfi_restore w20
-; CHECK-NEXT:    .cfi_restore w21
-; CHECK-NEXT:    .cfi_restore w22
-; CHECK-NEXT:    .cfi_restore w23
 ; CHECK-NEXT:    .cfi_restore b8
 ; CHECK-NEXT:    .cfi_restore b9
 ; CHECK-NEXT:    .cfi_restore b10
diff --git a/llvm/test/CodeGen/AArch64/scalarize-vector-load.ll b/llvm/test/CodeGen/AArch64/scalarize-vector-load.ll
index eb3a0391eb79e..0ed29b48cf2f8 100644
--- a/llvm/test/CodeGen/AArch64/scalarize-vector-load.ll
+++ b/llvm/test/CodeGen/AArch64/scalarize-vector-load.ll
@@ -4,36 +4,35 @@
 define i8 @scalarize_v16i8(ptr %p) {
 ; CHECK-LABEL: scalarize_v16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    umov w9, v0.b[1]
-; CHECK-NEXT:    umov w10, v0.b[2]
-; CHECK-NEXT:    umov w11, v0.b[3]
-; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    umov w16, v0.b[8]
-; CHECK-NEXT:    umov w17, v0.b[9]
-; CHECK-NEXT:    umov w18, v0.b[10]
-; CHECK-NEXT:    umov w0, v0.b[11]
-; CHECK-NEXT:    umov w1, v0.b[12]
-; CHECK-NEXT:    umov w2, v0.b[13]
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    umov w3, v0.b[14]
-; CHECK-NEXT:    umov w4, v0.b[15]
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w10, w12, w13
-; CHECK-NEXT:    add w11, w14, w15
+; CHECK-NEXT:    ldrb w8, [x0, #3]
+; CHECK-NEXT:    ldrb w9, [x0, #2]
+; CHECK-NEXT:    ldrb w10, [x0, #1]
+; CHECK-NEXT:    ldrb w11, [x0]
+; CHECK-NEXT:    ldrb w13, [x0, #5]
+; CHECK-NEXT:    ldrb w14, [x0, #4]
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    ldrb w12, [x0, #15]
+; CHECK-NEXT:    ldrb w15, [x0, #11]
+; CHECK-NEXT:    add w10, w11, w10
+; CHECK-NEXT:    add w9, w14, w13
+; CHECK-NEXT:    ldrb w11, [x0, #10]
+; CHECK-NEXT:    ldrb w13, [x0, #9]
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    ldrb w14, [x0, #8]
+; CHECK-NEXT:    ldrb w16, [x0, #7]
+; CHECK-NEXT:    add w11, w11, w15
+; CHECK-NEXT:    ldrb w17, [x0, #6]
+; CHECK-NEXT:    ldrb w18, [x0, #14]
+; CHECK-NEXT:    add w13, w14, w13
+; CHECK-NEXT:    ldrb w1, [x0, #13]
+; CHECK-NEXT:    ldrb w0, [x0, #12]
+; CHECK-NEXT:    add w16, w17, w16
+; CHECK-NEXT:    add w10, w13, w11
+; CHECK-NEXT:    add w12, w18, w12
+; CHECK-NEXT:    add w9, w9, w16
+; CHECK-NEXT:    add w14, w0, w1
 ; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w12, w16, w17
-; CHECK-NEXT:    add w13, w18, w0
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w14, w1, w2
-; CHECK-NEXT:    add w10, w12, w13
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w15, w3, w4
-; CHECK-NEXT:    add w11, w14, w15
+; CHECK-NEXT:    add w11, w14, w12
 ; CHECK-NEXT:    add w9, w10, w11
 ; CHECK-NEXT:    add w0, w8, w9
 ; CHECK-NEXT:    ret
@@ -75,22 +74,21 @@ define i8 @scalarize_v16i8(ptr %p) {
 define i8 @scalarize_v8i8(ptr %p) {
 ; CHECK-LABEL: scalarize_v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    umov w9, v0.b[1]
-; CHECK-NEXT:    umov w10, v0.b[2]
-; CHECK-NEXT:    umov w11, v0.b[3]
-; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w14, v0.b[6]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w10, w12, w13
-; CHECK-NEXT:    add w11, w14, w15
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ldrb w8, [x0, #7]
+; CHECK-NEXT:    ldrb w9, [x0, #6]
+; CHECK-NEXT:    ldrb w10, [x0, #5]
+; CHECK-NEXT:    ldrb w11, [x0, #1]
+; CHECK-NEXT:    ldrb w12, [x0]
+; CHECK-NEXT:    ldrb w13, [x0, #4]
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    ldrb w14, [x0, #3]
+; CHECK-NEXT:    ldrb w15, [x0, #2]
+; CHECK-NEXT:    add w11, w12, w11
+; CHECK-NEXT:    add w10, w13, w10
+; CHECK-NEXT:    add w12, w15, w14
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w9, w11, w12
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %wide.load = load <8 x i8>, ptr %p, align 4
   %l0 = extractelement <8 x i8> %wide.load, i32 0
@@ -114,22 +112,21 @@ define i8 @scalarize_v8i8(ptr %p) {
 define i16 @scalarize_v8i16(ptr %p) {
 ; CHECK-LABEL: scalarize_v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    umov w8, v0.h[0]
-; CHECK-NEXT:    umov w9, v0.h[1]
-; CHECK-NEXT:    umov w10, v0.h[2]
-; CHECK-NEXT:    umov w11, v0.h[3]
-; CHECK-NEXT:    umov w12, v0.h[4]
-; CHECK-NEXT:    umov w13, v0.h[5]
-; CHECK-NEXT:    umov w14, v0.h[6]
-; CHECK-NEXT:    umov w15, v0.h[7]
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w10, w12, w13
-; CHECK-NEXT:    add w11, w14, w15
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ldrh w8, [x0, #14]
+; CHECK-NEXT:    ldrh w9, [x0, #12]
+; CHECK-NEXT:    ldrh w10, [x0, #10]
+; CHECK-NEXT:    ldrh w11, [x0, #2]
+; CHECK-NEXT:    ldrh w12, [x0]
+; CHECK-NEXT:    ldrh w13, [x0, #8]
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    ldrh w14, [x0, #6]
+; CHECK-NEXT:    ldrh w15, [x0, #4]
+; CHECK-NEXT:    add w11, w12, w11
+; CHECK-NEXT:    add w10, w13, w10
+; CHECK-NEXT:    add w12, w15, w14
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w9, w11, w12
+; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
   %wide.load = load <8 x i16>, ptr %p, align 4
   %l0 = extractelement <8 x i16> %wide.load, i32 0
@@ -153,14 +150,13 @@ define i16 @scalarize_v8i16(ptr %p) {
 define i16 @scalarize_v4i16(ptr %p) {
 ; CHECK-LABEL: scalarize_v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    umov w8, v0.h[0]
-; CHECK-NEXT:    umov w9, v0.h[1]
-; CHECK-NEXT:    umov w10, v0.h[2]
-; CHECK-NEXT:    umov w11, v0.h[3]
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w9, w10, w11
-; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ldrh w8, [x0, #6]
+; CHECK-NEXT:    ldrh w9, [x0, #4]
+; CHECK-NEXT:    ldrh w10, [x0, #2]
+; CHECK-NEXT:    ldrh w11, [x0]
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w10, w11, w10
+; CHECK-NEXT:    add w0, w10, w8
 ; CHECK-NEXT:    ret
   %wide.load = load <4 x i16>, ptr %p, align 4
   %l0 = extractelement <4 x i16> %wide.load, i32 0
@@ -176,13 +172,10 @@ define i16 @scalarize_v4i16(ptr %p) {
 define i32 @scalarize_v4i32(ptr %p) {
 ; CHECK-LABEL: scalarize_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    mov w9, v0.s[2]
-; CHECK-NEXT:    mov w10, v0.s[3]
-; CHECK-NEXT:    fmov w11, s0
-; CHECK-NEXT:    add w8, w11, w8
-; CHECK-NEXT:    add w9, w9, w10
+; CHECK-NEXT:    ldp w9, w8, [x0]
+; CHECK-NEXT:    ldp w10, w11, [x0, #8]
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w9, w10, w11
 ; CHECK-NEXT:    add w0, w8, w9
 ; CHECK-NEXT:    ret
   %wide.load = load <4 x i32>, ptr %p, align 4
@@ -199,11 +192,10 @@ define i32 @scalarize_v4i32(ptr %p) {
 define i64 @scalarize_v4i64(ptr %p) {
 ; CHECK-LABEL: scalarize_v4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    addp d1, v1.2d
-; CHECK-NEXT:    addp d0, v0.2d
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    ldp x8, x9, [x0]
+; CHECK-NEXT:    ldp x10, x11, [x0, #16]
+; CHECK-NEXT:    add x8, x8, x9
+; CHECK-NEXT:    add x9, x10, x11
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
   %wide.load = load <4 x i64>, ptr %p, align 4
@@ -220,14 +212,11 @@ define i64 @scalarize_v4i64(ptr %p) {
 define i64 @scalarize_v4i32_sext(ptr %p) {
 ; CHECK-LABEL: scalarize_v4i32_sext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    sshll2 v1.2d, v0.4s, #0
-; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
-; CHECK-NEXT:    addp d0, v0.2d
-; CHECK-NEXT:    addp d1, v1.2d
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    add x0, x8, x9
+; CHECK-NEXT:    ldpsw x9, x8, [x0, #8]
+; CHECK-NEXT:    ldpsw x11, x10, [x0]
+; CHECK-NEXT:    add x8, x9, x8
+; CHECK-NEXT:    add x10, x11, x10
+; CHECK-NEXT:    add x0, x10, x8
 ; CHECK-NEXT:    ret
   %wide.load = load <4 x i32>, ptr %p, align 4
   %ext = sext <4 x i32> %wide.load to <4 x i64>
@@ -244,14 +233,11 @@ define i64 @scalarize_v4i32_sext(ptr %p) {
 define i64 @scalarize_v4i32_zext(ptr %p) {
 ; CHECK-LABEL: scalarize_v4i32_zext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    ushll2 v1.2d, v0.4s, #0
-; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-NEXT:    addp d0, v0.2d
-; CHECK-NEXT:    addp d1, v1.2d
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    add x0, x8, x9
+; CHECK-NEXT:    ldp w9, w8, [x0, #8]
+; CHECK-NEXT:    ldp w11, w10, [x0]
+; CHECK-NEXT:    add x8, x9, x8
+; CHECK-NEXT:    add x10, x11, x10
+; CHECK-NEXT:    add x0, x10, x8
 ; CHECK-NEXT:    ret
   %wide.load = load <4 x i32>, ptr %p, align 4
   %ext = zext <4 x i32> %wide.load to <4 x i64>
@@ -340,55 +326,43 @@ define double @scalarize_v4f64(ptr %p) {
 define float @scalarize_into_load(i64 %22, ptr %23, ptr %rawA, ptr %rawB) {
 ; CHECK-LABEL: scalarize_into_load:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldp q1, q0, [x1]
-; CHECK-NEXT:    ldp q3, q2, [x1, #96]
-; CHECK-NEXT:    ldp q5, q4, [x1, #64]
-; CHECK-NEXT:    ldp q7, q6, [x1, #32]
-; CHECK-NEXT:    mov x8, v1.d[1]
-; CHECK-NEXT:    mov x10, v0.d[1]
-; CHECK-NEXT:    mov x1, v3.d[1]
-; CHECK-NEXT:    mov x4, v2.d[1]
-; CHECK-NEXT:    mov x16, v5.d[1]
-; CHECK-NEXT:    mov x18, v4.d[1]
-; CHECK-NEXT:    fmov x9, d1
-; CHECK-NEXT:    mov x12, v7.d[1]
-; CHECK-NEXT:    mov x14, v6.d[1]
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    fmov x13, d7
-; CHECK-NEXT:    fmov x15, d6
-; CHECK-NEXT:    fmov x17, d5
-; CHECK-NEXT:    fmov x0, d4
-; CHECK-NEXT:    fmov x3, d3
-; CHECK-NEXT:    fmov x5, d2
-; CHECK-NEXT:    ldr s0, [x2, x9, lsl #2]
-; CHECK-NEXT:    ldr s1, [x2, x8, lsl #2]
-; CHECK-NEXT:    ldr s2, [x2, x11, lsl #2]
-; CHECK-NEXT:    ldr s3, [x2, x10, lsl #2]
-; CHECK-NEXT:    ldr s4, [x2, x13, lsl #2]
-; CHECK-NEXT:    ldr s5, [x2, x12, lsl #2]
-; CHECK-NEXT:    ldr s6, [x2, x15, lsl #2]
-; CHECK-NEXT:    ldr s7, [x2, x14, lsl #2]
-; CHECK-NEXT:    ldr s16, [x2, x17, lsl #2]
-; CHECK-NEXT:    ldr s17, [x2, x16, lsl #2]
-; CHECK-NEXT:    ldr s18, [x2, x0, lsl #2]
-; CHECK-NEXT:    ldr s19, [x2, x18, lsl #2]
-; CHECK-NEXT:    ldr s20, [x2, x3, lsl #2]
-; CHECK-NEXT:    ldr s21, [x2, x1, lsl #2]
-; CHECK-NEXT:    ldr s22, [x2, x5, lsl #2]
-; CHECK-NEXT:    ldr s23, [x2, x4, lsl #2]
+; CHECK-NEXT:    ldp x8, x9, [x1]
+; CHECK-NEXT:    ldp x10, x11, [x1, #16]
+; CHECK-NEXT:    ldp x12, x13, [x1, #64]
+; CHECK-NEXT:    ldr s0, [x2, x8, lsl #2]
+; CHECK-NEXT:    ldr s1, [x2, x9, lsl #2]
+; CHECK-NEXT:    ldp x8, x9, [x1, #32]
+; CHECK-NEXT:    ldr s2, [x2, x10, lsl #2]
+; CHECK-NEXT:    ldr s3, [x2, x11, lsl #2]
 ; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldr s6, [x2, x12, lsl #2]
+; CHECK-NEXT:    ldp x10, x11, [x1, #48]
+; CHECK-NEXT:    ldr s7, [x2, x13, lsl #2]
 ; CHECK-NEXT:    fadd s1, s2, s3
-; CHECK-NEXT:    fadd s2, s4, s5
-; CHECK-NEXT:    fadd s3, s6, s7
-; CHECK-NEXT:    fadd s4, s16, s17
-; CHECK-NEXT:    fadd s5, s18, s19
-; CHECK-NEXT:    fadd s6, s20, s21
-; CHECK-NEXT:    fadd s7, s22, s23
+; CHECK-NEXT:    ldr s2, [x2, x8, lsl #2]
+; CHECK-NEXT:    ldr s3, [x2, x9, lsl #2]
+; CHECK-NEXT:    ldp x14, x15, [x1, #80]
+; CHECK-NEXT:    fadd s2, s2, s3
+; CHECK-NEXT:    ldr s4, [x2, x10, lsl #2]
+; CHECK-NEXT:    ldr s5, [x2, x11, lsl #2]
+; CHECK-NEXT:    ldp x16, x17, [x1, #96]
+; CHECK-NEXT:    fadd s3, s4, s5
+; CHECK-NEXT:    fadd s4, s6, s7
 ; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldp x18, x0, [x1, #112]
+; CHECK-NEXT:    ldr s16, [x2, x14, lsl #2]
+; CHECK-NEXT:    ldr s17, [x2, x15, lsl #2]
+; CHECK-NEXT:    ldr s18, [x2, x16, lsl #2]
+; CHECK-NEXT:    ldr s19, [x2, x17, lsl #2]
+; CHECK-NEXT:    ldr s20, [x2, x18, lsl #2]
+; CHECK-NEXT:    ldr s21, [x2, x0, lsl #2]
+; CHECK-NEXT:    fadd s5, s16, s17
+; CHECK-NEXT:    fadd s6, s18, s19
 ; CHECK-NEXT:    fadd s1, s2, s3
+; CHECK-NEXT:    fadd s7, s20, s21
 ; CHECK-NEXT:    fadd s2, s4, s5
-; CHECK-NEXT:    fadd s3, s6, s7
 ; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    fadd s3, s6, s7
 ; CHECK-NEXT:    fadd s1, s2, s3
 ; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
@@ -463,57 +437,39 @@ entry:
 define float @scalarize_into_load_sext(i64 %22, ptr %23, ptr %rawA, ptr %rawB) {
 ; CHECK-LABEL: scalarize_into_load_sext:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldp q0, q2, [x1]
-; CHECK-NEXT:    ldp q4, q1, [x1, #32]
-; CHECK-NEXT:    sshll v3.2d, v0.2s, #0
-; CHECK-NEXT:    sshll2 v0.2d, v0.4s, #0
-; CHECK-NEXT:    sshll2 v6.2d, v2.4s, #0
-; CHECK-NEXT:    sshll2 v5.2d, v1.4s, #0
-; CHECK-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-NEXT:    sshll v2.2d, v2.2s, #0
-; CHECK-NEXT:    sshll2 v7.2d, v4.4s, #0
-; CHECK-NEXT:    sshll v4.2d, v4.2s, #0
-; CHECK-NEXT:    mov x8, v3.d[1]
-; CHECK-NEXT:    mov x10, v0.d[1]
-; CHECK-NEXT:    mov x14, v6.d[1]
-; CHECK-NEXT:    mov x12, v2.d[1]
-; CHECK-NEXT:    mov x1, v1.d[1]
-; CHECK-NEXT:    mov x4, v5.d[1]
-; CHECK-NEXT:    mov x16, v4.d[1]
-; CHECK-NEXT:    mov x18, v7.d[1]
-; CHECK-NEXT:    fmov x9, d3
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    fmov x13, d2
-; CHECK-NEXT:    fmov x15, d6
-; CHECK-NEXT:    fmov x17, d4
-; CHECK-NEXT:    fmov x0, d7
-; CHECK-NEXT:    ldr s2, [x2, x8, lsl #2]
-; CHECK-NEXT:    fmov x3, d1
-; CHECK-NEXT:    fmov x5, d5
+; CHECK-NEXT:    ldpsw x9, x8, [x1]
+; CHECK-NEXT:    ldpsw x11, x10, [x1, #8]
+; CHECK-NEXT:    ldpsw x13, x12, [x1, #24]
 ; CHECK-NEXT:    ldr s0, [x2, x9, lsl #2]
-; CHECK-NEXT:    ldr s1, [x2, x11, lsl #2]
+; CHECK-NEXT:    ldr s1, [x2, x8, lsl #2]
+; CHECK-NEXT:    ldpsw x9, x8, [x1, #56]
+; CHECK-NEXT:    ldr s2, [x2, x11, lsl #2]
 ; CHECK-NEXT:    ldr s3, [x2, x10, lsl #2]
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldpsw x11, x10, [x1, #48]
+; CHECK-NEXT:    ldpsw x15, x14, [x1, #16]
+; CHECK-NEXT:    ldpsw x17, x16, [x1, #40]
+; CHECK-NEXT:    ldpsw x0, x18, [x1, #32]
+; CHECK-NEXT:    fadd s1, s2, s3
+; CHECK-NEXT:    ldr s2, [x2, x15, lsl #2]
+; CHECK-NEXT:    ldr s3, [x2, x14, lsl #2]
 ; CHECK-NEXT:    ldr s4, [x2, x13, lsl #2]
 ; CHECK-NEXT:    ldr s5, [x2, x12, lsl #2]
-; CHECK-NEXT:    ldr s6, [x2, x15, lsl #2]
-; CHECK-NEXT:    ldr s7, [x2, x14, lsl #2]
 ; CHECK-NEXT:    ldr s16, [x2, x17, lsl #2]
+; CHECK-NEXT:    ldr s6, [x2, x0, lsl #2]
+; CHECK-NEXT:    fadd s2, s2, s3
+; CHECK-NEXT:    ldr s7, [x2, x18, lsl #2]
 ; CHECK-NEXT:    ldr s17, [x2, x16, lsl #2]
-; CHECK-NEXT:    ldr s18, [x2, x0, lsl #2]
-; CHECK-NEXT:    ldr s19, [x2, x18, lsl #2]
-; CHECK-NEXT:    ldr s20, [x2, x3, lsl #2]
-; CHECK-NEXT:    ldr s21, [x2, x1, lsl #2]
-; CHECK-NEXT:    ldr s22, [x2, x5, lsl #2]
-; CHECK-NEXT:    ldr s23, [x2, x4, lsl #2]
-; CHECK-NEXT:    fadd s0, s0, s2
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fadd s2, s4, s5
-; CHECK-NEXT:    fadd s3, s6, s7
-; CHECK-NEXT:    fadd s4, s16, s17
-; CHECK-NEXT:    fadd s5, s18, s19
-; CHECK-NEXT:    fadd s6, s20, s21
-; CHECK-NEXT:    fadd s7, s22, s23
+; CHECK-NEXT:    fadd s3, s4, s5
+; CHECK-NEXT:    ldr s18, [x2, x11, lsl #2]
+; CHECK-NEXT:    ldr s19, [x2, x10, lsl #2]
+; CHECK-NEXT:    fadd s4, s6, s7
 ; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldr s20, [x2, x9, lsl #2]
+; CHECK-NEXT:    ldr s21, [x2, x8, lsl #2]
+; CHECK-NEXT:    fadd s5, s16, s17
+; CHECK-NEXT:    fadd s6, s18, s19
+; CHECK-NEXT:    fadd s7, s20, s21
 ; CHECK-NEXT:    fadd s1, s2, s3
 ; CHECK-NEXT:    fadd s2, s4, s5
 ; CHECK-NEXT:    fadd s3, s6, s7
@@ -593,57 +549,39 @@ entry:
 define float @scalarize_into_load_zext(i64 %22, ptr %23, ptr %rawA, ptr %rawB) {
 ; CHECK-LABEL: scalarize_into_load_zext:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldp q0, q2, [x1]
-; CHECK-NEXT:    ldp q4, q1, [x1, #32]
-; CHECK-NEXT:    ushll v3.2d, v0.2s, #0
-; CHECK-NEXT:    ushll2 v0.2d, v0.4s, #0
-; CHECK-NEXT:    ushll2 v6.2d, v2.4s, #0
-; CHECK-NEXT:    ushll2 v5.2d, v1.4s, #0
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
-; CHECK-NEXT:    ushll2 v7.2d, v4.4s, #0
-; CHECK-NEXT:    ushll v4.2d, v4.2s, #0
-; CHECK-NEXT:    mov x8, v3.d[1]
-; CHECK-NEXT:    mov x10, v0.d[1]
-; CHECK-NEXT:    mov x14, v6.d[1]
-; CHECK-NEXT:    mov x12, v2.d[1]
-; CHECK-NEXT:    mov x1, v1.d[1]
-; CHECK-NEXT:    mov x4, v5.d[1]
-; CHECK-NEXT:    mov x16, v4.d[1]
-; CHECK-NEXT:    mov x18, v7.d[1]
-; CHECK-NEXT:    fmov x9, d3
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    fmov x13, d2
-; CHECK-NEXT:    fmov x15, d6
-; CHECK-NEXT:    fmov x17, d4
-; CHECK-NEXT:    fmov x0, d7
-; CHECK-NEXT:    ldr s2, [x2, x8, lsl #2]
-; CHECK-NEXT:    fmov x3, d1
-; CHECK-NEXT:    fmov x5, d5
+; CHECK-NEXT:    ldp w9, w8, [x1]
+; CHECK-NEXT:    ldp w11, w10, [x1, #8]
+; CHECK-NEXT:    ldp w13, w12, [x1, #24]
 ; CHECK-NEXT:    ldr s0, [x2, x9, lsl #2]
-; CHECK-NEXT:    ldr s1, [x2, x11, lsl #2]
+; CHECK-NEXT:    ldr s1, [x2, x8, lsl #2]
+; CHECK-NEXT:    ldp w9, w8, [x1, #56]
+; CHECK-NEXT:    ldr s2, [x2, x11, lsl #2]
 ; CHECK-NEXT:    ldr s3, [x2, x10, lsl #2]
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldp w11, w10, [x1, #48]
+; CHECK-NEXT:    ldp w15, w14, [x1, #16]
+; CHECK-NEXT:    ldp w17, w16, [x1, #40]
+; CHECK-NEXT:    ldp w0, w18, [x1, #32]
+; CHECK-NEXT:    fadd s1, s2, s3
+; CHECK-NEXT:    ldr s2, [x2, x15, lsl #2]
+; CHECK-NEXT:    ldr s3, [x2, x14, lsl #2]
 ; CHECK-NEXT:    ldr s4, [x2, x13, lsl #2]
 ; CHECK-NEXT:    ldr s5, [x2, x12, lsl #2]
-; CHECK-NEXT:    ldr s6, [x2, x15, lsl #2]
-; CHECK-NEXT:    ldr s7, [x2, x14, lsl #2]
 ; CHECK-NEXT:    ldr s16, [x2, x17, lsl #2]
+; CHECK-NEXT:    ldr s6, [x2, x0, lsl #2]
+; CHECK-NEXT:    fadd s2, s2, s3
+; CHECK-NEXT:    ldr s7, [x2, x18, lsl #2]
 ; CHECK-NEXT:    ldr s17, [x2, x16, lsl #2]
-; CHECK-NEXT:    ldr s18, [x2, x0, lsl #2]
-; CHECK-NEXT:    ldr s19, [x2, x18, lsl #2]
-; CHECK-NEXT:    ldr s20, [x2, x3, lsl #2]
-; CHECK-NEXT:    ldr s21, [x2, x1, lsl #2]
-; CHECK-NEXT:    ldr s22, [x2, x5, lsl #2]
-; CHECK-NEXT:    ldr s23, [x2, x4, lsl #2]
-; CHECK-NEXT:    fadd s0, s0, s2
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fadd s2, s4, s5
-; CHECK-NEXT:    fadd s3, s6, s7
-; CHECK-NEXT:    fadd s4, s16, s17
-; CHECK-NEXT:    fadd s5, s18, s19
-; CHECK-NEXT:    fadd s6, s20, s21
-; CHECK-NEXT:    fadd s7, s22, s23
+; CHECK-NEXT:    fadd s3, s4, s5
+; CHECK-NEXT:    ldr s18, [x2, x11, lsl #2]
+; CHECK-NEXT:    ldr s19, [x2, x10, lsl #2]
+; CHECK-NEXT:    fadd s4, s6, s7
 ; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ldr s20, [x2, x9, lsl #2]
+; CHECK-NEXT:    ldr s21, [x2, x8, lsl #2]
+; CHECK-NEXT:    fadd s5, s16, s17
+; CHECK-NEXT:    fadd s6, s18, s19
+; CHECK-NEXT:    fadd s7, s20, s21
 ; CHECK-NEXT:    fadd s1, s2, s3
 ; CHECK-NEXT:    fadd s2, s4, s5
 ; CHECK-NEXT:    fadd s3, s6, s7
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
index 505a40c16653b..d00efa7d99d53 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
@@ -374,8 +374,8 @@ define i8 @call_to_non_streaming_pass_sve_objects(ptr nocapture noundef readnone
 ; CHECK-NEXT:    smstop sm
 ; CHECK-NEXT:    bl foo
 ; CHECK-NEXT:    smstart sm
-; CHECK-NEXT:    ldr z0, [sp, #2, mul vl]
-; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    addvl x8, sp, #2
+; CHECK-NEXT:    ldrb w0, [x8]
 ; CHECK-NEXT:    addvl sp, sp, #3
 ; CHECK-NEXT:    ldp x29, x30, [sp, #64] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp d9, d8, [sp, #48] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
index f6ed2e6a787f0..ba7bee9a94bac 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
@@ -19,14 +19,12 @@ define <4 x i32> @load_zext_v4i16i32(ptr %ap) vscale_range(2,0) #0 {
 define <2 x i256> @load_zext_v2i64i256(ptr %ap) #0 {
 ; CHECK-LABEL: load_zext_v2i64i256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldp x0, x4, [x0]
 ; CHECK-NEXT:    mov x1, xzr
 ; CHECK-NEXT:    mov x2, xzr
 ; CHECK-NEXT:    mov x3, xzr
 ; CHECK-NEXT:    mov x5, xzr
 ; CHECK-NEXT:    mov x6, xzr
-; CHECK-NEXT:    mov x4, v0.d[1]
-; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    mov x7, xzr
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %ap
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index ebd32c73ec65b..6fd5b820a2242 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -438,8 +438,7 @@ define void @masked_gather_v32i32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define void @masked_gather_v1i64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-LABEL: masked_gather_v1i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    // implicit-def: $d0
 ; CHECK-NEXT:    cbnz x8, .LBB15_2
 ; CHECK-NEXT:  // %bb.1: // %cond.load
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
index a69808d32ed73..4f5a5a6dee257 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-splat-vector.ll
@@ -727,8 +727,8 @@ define void @load_splat_v4f64(ptr %a, ptr %b) vscale_range(2,2) #0 {
 define void @load_splat_v32i8(ptr %a, ptr %b) vscale_range(2,2) #0 {
 ; CHECK-LABEL: load_splat_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    mov z0.b, b0
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    str z0, [x1]
 ; CHECK-NEXT:    ret
   %v = load <32 x i8>, ptr %a
@@ -740,8 +740,8 @@ define void @load_splat_v32i8(ptr %a, ptr %b) vscale_range(2,2) #0 {
 define void @load_splat_v16i16(ptr %a, ptr %b) vscale_range(2,2) #0 {
 ; CHECK-LABEL: load_splat_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    mov z0.h, h0
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    str z0, [x1]
 ; CHECK-NEXT:    ret
   %v = load <16 x i16>, ptr %a
@@ -753,8 +753,8 @@ define void @load_splat_v16i16(ptr %a, ptr %b) vscale_range(2,2) #0 {
 define void @load_splat_v8i32(ptr %a, ptr %b) vscale_range(2,2) #0 {
 ; CHECK-LABEL: load_splat_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    mov z0.s, s0
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    str z0, [x1]
 ; CHECK-NEXT:    ret
   %v = load <8 x i32>, ptr %a
@@ -766,8 +766,8 @@ define void @load_splat_v8i32(ptr %a, ptr %b) vscale_range(2,2) #0 {
 define void @load_splat_v4i64(ptr %a, ptr %b) vscale_range(2,2) #0 {
 ; CHECK-LABEL: load_splat_v4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr z0, [x0]
-; CHECK-NEXT:    mov z0.d, d0
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    str z0, [x1]
 ; CHECK-NEXT:    ret
   %v = load <4 x i64>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 4d524bc848de6..e433786cfdd1f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -99,16 +99,14 @@ define <2 x i64> @load_zext_v2i32i64(ptr %ap) {
 define <2 x i256> @load_zext_v2i64i256(ptr %ap) {
 ; CHECK-LABEL: load_zext_v2i64i256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldp x8, x4, [x0]
 ; CHECK-NEXT:    mov x1, xzr
 ; CHECK-NEXT:    mov x2, xzr
 ; CHECK-NEXT:    mov x3, xzr
 ; CHECK-NEXT:    mov x5, xzr
 ; CHECK-NEXT:    mov x6, xzr
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    mov x7, xzr
-; CHECK-NEXT:    fmov x4, d1
+; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: load_zext_v2i64i256:
@@ -282,14 +280,12 @@ define <4 x i256> @load_sext_v4i32i256(ptr %ap) {
 define <2 x i256> @load_sext_v2i64i256(ptr %ap) {
 ; CHECK-LABEL: load_sext_v2i64i256:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    mov z1.d, z0.d[1]
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    fmov x4, d1
-; CHECK-NEXT:    asr x1, x0, #63
+; CHECK-NEXT:    ldp x8, x4, [x0]
+; CHECK-NEXT:    asr x1, x8, #63
+; CHECK-NEXT:    asr x5, x4, #63
+; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    mov x2, x1
 ; CHECK-NEXT:    mov x3, x1
-; CHECK-NEXT:    asr x5, x4, #63
 ; CHECK-NEXT:    mov x6, x5
 ; CHECK-NEXT:    mov x7, x5
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
index e6c6003ee6c69..094eaad0cfe80 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ld2-alloca.ll
@@ -115,9 +115,9 @@ define void @alloc_v32i8(ptr %st_ptr) nounwind {
 ; CHECK-NEXT:    adrp x8, .LCPI2_0
 ; CHECK-NEXT:    ldr q0, [sp]
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    ldrb w8, [sp, #16]
 ; CHECK-NEXT:    tbl z0.b, { z0.b }, z1.b
-; CHECK-NEXT:    ldr q1, [sp, #16]
-; CHECK-NEXT:    stur b1, [x19, #8]
+; CHECK-NEXT:    strb w8, [x19, #8]
 ; CHECK-NEXT:    str d0, [x19]
 ; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
index ea6123edc8b4c..7b9b69e0d9b4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll
@@ -101,15 +101,13 @@ define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) {
 define void @shuffle_ext_byone_v32i8(ptr %a, ptr %b) {
 ; CHECK-LABEL: shuffle_ext_byone_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0, #16]
-; CHECK-NEXT:    ldp q1, q3, [x1]
-; CHECK-NEXT:    mov z0.b, z0.b[15]
-; CHECK-NEXT:    mov z2.b, z1.b[15]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    insr z1.b, w8
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    insr z3.b, w8
-; CHECK-NEXT:    stp q1, q3, [x0]
+; CHECK-NEXT:    ldp q0, q2, [x1]
+; CHECK-NEXT:    ldrb w8, [x0, #31]
+; CHECK-NEXT:    mov z1.b, z0.b[15]
+; CHECK-NEXT:    insr z0.b, w8
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    insr z2.b, w8
+; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: shuffle_ext_byone_v32i8:
@@ -238,15 +236,13 @@ define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) {
 define void @shuffle_ext_byone_v16i16(ptr %a, ptr %b) {
 ; CHECK-LABEL: shuffle_ext_byone_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0, #16]
-; CHECK-NEXT:    ldp q1, q3, [x1]
-; CHECK-NEXT:    mov z0.h, z0.h[7]
-; CHECK-NEXT:    mov z2.h, z1.h[7]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    insr z1.h, w8
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    insr z3.h, w8
-; CHECK-NEXT:    stp q1, q3, [x0]
+; CHECK-NEXT:    ldp q0, q2, [x1]
+; CHECK-NEXT:    ldrh w8, [x0, #30]
+; CHECK-NEXT:    mov z1.h, z0.h[7]
+; CHECK-NEXT:    insr z0.h, w8
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    insr z2.h, w8
+; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: shuffle_ext_byone_v16i16:
@@ -341,15 +337,13 @@ define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) {
 define void @shuffle_ext_byone_v8i32(ptr %a, ptr %b) {
 ; CHECK-LABEL: shuffle_ext_byone_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0, #16]
-; CHECK-NEXT:    ldp q1, q3, [x1]
-; CHECK-NEXT:    mov z0.s, z0.s[3]
-; CHECK-NEXT:    mov z2.s, z1.s[3]
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    insr z1.s, w8
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    insr z3.s, w8
-; CHECK-NEXT:    stp q1, q3, [x0]
+; CHECK-NEXT:    ldp q0, q2, [x1]
+; CHECK-NEXT:    ldr w8, [x0, #28]
+; CHECK-NEXT:    mov z1.s, z0.s[3]
+; CHECK-NEXT:    insr z0.s, w8
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    insr z2.s, w8
+; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: shuffle_ext_byone_v8i32:
@@ -409,15 +403,13 @@ define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) {
 define void @shuffle_ext_byone_v4i64(ptr %a, ptr %b) {
 ; CHECK-LABEL: shuffle_ext_byone_v4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr q0, [x0, #16]
-; CHECK-NEXT:    ldp q1, q3, [x1]
-; CHECK-NEXT:    mov z0.d, z0.d[1]
-; CHECK-NEXT:    mov z2.d, z1.d[1]
-; CHECK-NEXT:    fmov x8, d0
-; CHECK-NEXT:    insr z1.d, x8
-; CHECK-NEXT:    fmov x8, d2
-; CHECK-NEXT:    insr z3.d, x8
-; CHECK-NEXT:    stp q1, q3, [x0]
+; CHECK-NEXT:    ldp q0, q2, [x1]
+; CHECK-NEXT:    ldr x8, [x0, #24]
+; CHECK-NEXT:    mov z1.d, z0.d[1]
+; CHECK-NEXT:    insr z0.d, x8
+; CHECK-NEXT:    fmov x8, d1
+; CHECK-NEXT:    insr z2.d, x8
+; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
 ;
 ; NONEON-NOSVE-LABEL: shuffle_ext_byone_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/vector-compress.ll b/llvm/test/CodeGen/AArch64/vector-compress.ll
index 9165493863729..55c343164a1b8 100644
--- a/llvm/test/CodeGen/AArch64/vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/vector-compress.ll
@@ -462,10 +462,9 @@ define <3 x i3> @test_compress_narrow_illegal_element_type(<3 x i3> %vec, <3 x i
 ; CHECK-NEXT:    orr x8, x9, x8, lsl #1
 ; CHECK-NEXT:    strh w1, [x10]
 ; CHECK-NEXT:    strh w2, [x8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
-; CHECK-NEXT:    umov.h w0, v0[0]
-; CHECK-NEXT:    umov.h w1, v0[1]
-; CHECK-NEXT:    umov.h w2, v0[2]
+; CHECK-NEXT:    ldrh w0, [sp, #8]
+; CHECK-NEXT:    ldrh w1, [sp, #10]
+; CHECK-NEXT:    ldrh w2, [sp, #12]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
     %out = call <3 x i3> @llvm.experimental.vector.compress(<3 x i3> %vec, <3 x i1> %mask, <3 x i3> undef)

>From 0d43d7c0c7fd2fe5cee69215f394b2b4de18f9fc Mon Sep 17 00:00:00 2001
From: Durgadoss R <durgadossr at nvidia.com>
Date: Tue, 23 Sep 2025 13:03:35 +0530
Subject: [PATCH 12/42] [MLIR][NVVM] Update TMA Load Op (#156347)

This patch includes im2col and gather mode
support for the TMA Load Op. The lowering is
also updated to intrinsics except when a Predicate
is given. This completes the Blackwell additions
on this Op.

* NVVM Dialect has support for Shared::Cluster
   address-space now. So, this patch also updates the
   Op to use AS(7) instead of AS(3). The corresponding
   inline-ptx based unit tests are also updated.
*  lit tests are added for all combinations.

Signed-off-by: Durgadoss R <durgadossr at nvidia.com>
---
 mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td   |  40 ++-
 .../Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp    |  13 +
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp    | 167 +++++++++-
 .../Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir |   3 +-
 .../Conversion/NVVMToLLVM/nvvm-to-llvm.mlir   | 104 +++---
 mlir/test/Dialect/LLVMIR/invalid.mlir         |  31 --
 ...a_load_64x8_8x128_noswizzle-transform.mlir |   4 +-
 .../LLVMIR/nvvm/tma_load_cluster_im2col.mlir  | 298 ++++++++++++++++++
 .../LLVMIR/nvvm/tma_load_cluster_tile.mlir    | 204 ++++++++++++
 .../LLVMIR/nvvm/tma_load_cta_im2col.mlir      | 109 +++++++
 .../Target/LLVMIR/nvvm/tma_load_cta_tile.mlir |  73 +++++
 .../Target/LLVMIR/nvvm/tma_load_invalid.mlir  |  98 ++++++
 12 files changed, 1025 insertions(+), 119 deletions(-)
 create mode 100644 mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_im2col.mlir
 create mode 100644 mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_tile.mlir
 create mode 100644 mlir/test/Target/LLVMIR/nvvm/tma_load_cta_im2col.mlir
 create mode 100644 mlir/test/Target/LLVMIR/nvvm/tma_load_cta_tile.mlir
 create mode 100644 mlir/test/Target/LLVMIR/nvvm/tma_load_invalid.mlir

diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 797f8ada9f238..05ca69e404ba9 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -2827,26 +2827,21 @@ def NVVM_CpAsyncBulkTensorGlobalToSharedClusterOp :
   NVVM_Op<"cp.async.bulk.tensor.shared.cluster.global", 
   [DeclareOpInterfaceMethods<BasicPtxBuilderOpInterface>, 
   AttrSizedOperandSegments, NVVMRequiresSM<90>]>,
-  Arguments<(ins  LLVM_PointerShared:$dstMem,
-                  LLVM_AnyPointer:$tmaDescriptor,
+  Arguments<(ins  AnyTypeOf<[LLVM_PointerShared, LLVM_PointerSharedCluster]>:$dstMem,
+                  LLVM_PointerGeneric:$tmaDescriptor,
                   Variadic<I32>:$coordinates,
                   LLVM_PointerShared:$mbar,                  
                   Variadic<I16>:$im2colOffsets,
                   Optional<I16>:$multicastMask,
                   Optional<I64>:$l2CacheHint,
+                  DefaultValuedAttr<TMALoadModeAttr, "TMALoadMode::TILE">:$mode,
+                  DefaultValuedAttr<BoolAttr, "false">:$isCTAOnly,
+                  OptionalAttr<CTAGroupKindAttr>:$group,
                   PtxPredicate:$predicate)> {
   let description = [{
     Initiates an asynchronous copy operation on the tensor data from global 
-    memory to shared memory. 
-
-    The Op operates has two load modes:
-    1) Tiled Mode: It's the default mode. The source multi-dimensional tensor 
-    layout is preserved at the destination. 
-
-    2) Im2col Mode: This mode is used when `im2colOffsets` operands are present.
-    the elements in the Bounding Box of the source tensor are rearranged into
-    columns at the destination. In this mode, the tensor has to be at least 
-    3-dimensional. 
+    memory to shared::cluster (or) shared::cta memory. This Op supports all
+    the load modes specified in `TMALoadMode`.
 
     The `multicastMask` operand is optional. When it is present, the Op copies
     data from global memory to shared memory of multiple CTAs in the cluster.
@@ -2857,6 +2852,10 @@ def NVVM_CpAsyncBulkTensorGlobalToSharedClusterOp :
     The `l2CacheHint` operand is optional, and it is used to specify cache 
     eviction policy that may be used during the memory access.
     
+    When the `isCTAOnly` attribute is set to true, the destination is
+    shared::cta only. Hence, `multicastMask` and `CTAGroup` are not applicable
+    when `isCTAOnly` is true.
+
     [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor)
   }];
 
@@ -2904,6 +2903,23 @@ def NVVM_CpAsyncBulkTensorGlobalToSharedClusterOp :
     }
   }];
   let hasVerifier = 1;
+
+  let extraClassDeclaration = [{
+    bool hasIntrinsic() { return !getPredicate(); }
+
+    bool getAsmValues(RewriterBase &rewriter,
+      llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>> &asmValues);
+
+    static mlir::NVVM::IDArgPair
+    getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
+                          llvm::IRBuilderBase& builder);
+  }];
+
+  string llvmBuilder = [{
+    auto [id, args] = NVVM::CpAsyncBulkTensorGlobalToSharedClusterOp::getIntrinsicIDAndArgs(
+                      *op, moduleTranslation, builder);
+    createIntrinsicCall(builder, id, args);
+  }];
 }
 
 def NVVM_CpAsyncBulkTensorSharedCTAToGlobalOp : 
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index b7e3491117e9b..a9efada28a320 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -993,6 +993,14 @@ struct NVGPUTmaAsyncLoadOpLowering
     auto srcMemrefType = cast<MemRefType>(op.getDst().getType());
     Value dest = getStridedElementPtr(rewriter, op->getLoc(), srcMemrefType,
                                       adaptor.getDst(), {});
+    // Intrinsics takes a shared-cluster pointer so we need an
+    // address space cast from 3 to 7.
+    // TODO: Introduce AS(7) in NVGPU.
+    auto ptrSharedClusterType = LLVM::LLVMPointerType::get(
+        op->getContext(),
+        static_cast<unsigned>(NVVM::NVVMMemorySpace::SharedCluster));
+    dest = LLVM::AddrSpaceCastOp::create(b, ptrSharedClusterType, dest);
+
     Value barrier =
         getMbarrierPtr(b, op.getBarriers().getType(), adaptor.getBarriers(),
                        adaptor.getMbarId(), rewriter);
@@ -1001,9 +1009,14 @@ struct NVGPUTmaAsyncLoadOpLowering
     for (auto [index, value] : llvm::enumerate(coords)) {
       coords[index] = truncToI32(b, value);
     }
+
+    // TODO: Enhance the NVGPU Op for other modes too
     rewriter.replaceOpWithNewOp<NVVM::CpAsyncBulkTensorGlobalToSharedClusterOp>(
         op, dest, adaptor.getTensorMapDescriptor(), coords, barrier,
         ValueRange{}, adaptor.getMulticastMask(), Value{},
+        NVVM::TMALoadMode::TILE, // default is TILE mode
+        false,                   // default is cluster-scope
+        nullptr,                 // default is no cta-group
         adaptor.getPredicate());
     return success();
   }
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 13f1dd9a664e5..cc2a656ccb17f 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -45,12 +45,14 @@ using namespace NVVM;
 #include "mlir/Dialect/LLVMIR/NVVMOpsDialect.cpp.inc"
 #include "mlir/Dialect/LLVMIR/NVVMOpsEnums.cpp.inc"
 
+static constexpr unsigned notIntrinsic = llvm::Intrinsic::not_intrinsic;
+
 //===----------------------------------------------------------------------===//
 // Verifier methods
 //===----------------------------------------------------------------------===//
 
 // This verifier is shared among the following Ops:
-// CpAsyncBulkTensorGlobalToSharedClusterOp (TMA Load)
+// CpAsyncBulkTensorSharedCTAToGlobalOp (TMA Store)
 // CpAsyncBulkTensorReduceOp (TMA Store-Reduce)
 static LogicalResult cpAsyncBulkTensorCommonVerifier(size_t tensorDims,
                                                      bool isIm2Col,
@@ -74,13 +76,6 @@ static LogicalResult cpAsyncBulkTensorCommonVerifier(size_t tensorDims,
   return success();
 }
 
-LogicalResult CpAsyncBulkTensorGlobalToSharedClusterOp::verify() {
-  size_t numIm2ColOffsets = getIm2colOffsets().size();
-  bool isIm2Col = numIm2ColOffsets > 0;
-  return cpAsyncBulkTensorCommonVerifier(getCoordinates().size(), isIm2Col,
-                                         numIm2ColOffsets, getLoc());
-}
-
 LogicalResult CpAsyncBulkTensorSharedCTAToGlobalOp::verify() {
   TMAStoreMode mode = getMode();
   // We lower through inline-ptx when getPredicate() is true.
@@ -158,6 +153,38 @@ LogicalResult CpAsyncBulkTensorPrefetchOp::verify() {
                              getMode(), getLoc());
 }
 
+LogicalResult CpAsyncBulkTensorGlobalToSharedClusterOp::verify() {
+  TMALoadMode mode = getMode();
+  bool isCTAOnly = getIsCTAOnly();
+  if (getPredicate()) { // Inline-asm based lowering
+    if (isCTAOnly)
+      return emitError("Predicate is supported only for shared::cluster mode.");
+    if (mode != TMALoadMode::TILE && mode != TMALoadMode::IM2COL)
+      return emitError(
+          "Predicate is supported only for Tile and Im2col modes.");
+  } else { // Intrinsics-based lowering
+    NVVMMemorySpace expectedAS =
+        isCTAOnly ? NVVMMemorySpace::Shared : NVVMMemorySpace::SharedCluster;
+    unsigned AS = llvm::cast<LLVM::LLVMPointerType>(getDstMem().getType())
+                      .getAddressSpace();
+    if (AS != expectedAS)
+      return emitError()
+             << (isCTAOnly
+                     ? "Shared::cta destination requires address-space 3."
+                     : "Shared::cluster destination requires address-space 7.");
+    // Checks specific to shared::cta mode
+    if (isCTAOnly) {
+      if (getMulticastMask())
+        return emitError("Multicast is not supported with shared::cta mode.");
+      if (getGroup())
+        return emitError("CTAGroup is not supported with shared::cta mode.");
+    }
+  }
+
+  return verifyTMALoadParams(getCoordinates().size(), getIm2colOffsets().size(),
+                             getMode(), getLoc());
+}
+
 LogicalResult CpAsyncBulkTensorReduceOp::verify() {
   TMAStoreMode mode = getMode();
   size_t dims = getCoordinates().size();
@@ -1553,6 +1580,130 @@ mlir::NVVM::IDArgPair CpAsyncBulkSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
   return {id, std::move(args)};
 }
 
+bool CpAsyncBulkTensorGlobalToSharedClusterOp::getAsmValues(
+    RewriterBase &rewriter,
+    llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>>
+        &asmValues) {
+  // Add all the operands but not the attrs to the asmValues list.
+  // The attrs here are used to generate the right variants for
+  // intrinsics-lowering. So, we ignore them while generating inline-PTX.
+  for (auto val : getOperands())
+    asmValues.push_back({val, mlir::NVVM::PTXRegisterMod::Read});
+
+  return false;
+}
+
+mlir::NVVM::IDArgPair
+CpAsyncBulkTensorGlobalToSharedClusterOp::getIntrinsicIDAndArgs(
+    Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
+  auto thisOp = cast<NVVM::CpAsyncBulkTensorGlobalToSharedClusterOp>(op);
+  const bool isCTAOnly = thisOp.getIsCTAOnly();
+  llvm::SmallVector<llvm::Value *> args;
+
+  // Fill the Intrinsic Args
+  args.push_back(mt.lookupValue(thisOp.getDstMem()));
+  args.push_back(mt.lookupValue(thisOp.getMbar()));
+  args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
+
+  // Coordinates and im2col-offsets
+  for (mlir::Value v : thisOp.getCoordinates())
+    args.push_back(mt.lookupValue(v));
+  for (mlir::Value v : thisOp.getIm2colOffsets())
+    args.push_back(mt.lookupValue(v));
+
+  // MulticastMask, if available
+  mlir::Value mcMask = thisOp.getMulticastMask();
+  const bool hasMC = static_cast<bool>(mcMask);
+  llvm::Value *i16Zero =
+      llvm::ConstantInt::get(llvm::Type::getInt16Ty(mt.getLLVMContext()), 0);
+
+  // CacheHint, if available
+  mlir::Value cacheHint = thisOp.getL2CacheHint();
+  const bool hasCacheHint = static_cast<bool>(cacheHint);
+  llvm::Value *i64Zero =
+      llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
+
+  // Flag argument CTAGroup
+  // CTA_1/2 is mapped to values 1 and 2 for the intrinsics.
+  // Hence, the +1 to getGroup().
+  const int32_t val =
+      thisOp.getGroup() ? (static_cast<int32_t>(*thisOp.getGroup()) + 1) : 0;
+  llvm::Value *cg =
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(mt.getLLVMContext()), val);
+
+  if (!isCTAOnly) {
+    // For shared::cluster, all the arguments that we build are applicable.
+    args.push_back(hasMC ? mt.lookupValue(mcMask) : i16Zero);
+    args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Zero);
+    args.push_back(builder.getInt1(hasMC));
+    args.push_back(builder.getInt1(hasCacheHint));
+    args.push_back(cg);
+  } else {
+    // For shared::cta, only cache-hint is applicable.
+    args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Zero);
+    args.push_back(builder.getInt1(hasCacheHint));
+  }
+
+  constexpr size_t numDims = 5;  // 1D to 5D
+  constexpr size_t numModes = 5; // Tile, Im2col, w, w_128, gather4
+  using rowTy = std::array<llvm::Intrinsic::ID, numDims + 1>;
+  using TableTy = std::array<rowTy, numModes>;
+  static constexpr TableTy IDTable{
+      {{notIntrinsic, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_gather4_2d}}};
+
+  static constexpr TableTy IDTableCTA{
+      {{notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_1d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_2d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_3d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_4d,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_5d},
+       {notIntrinsic, notIntrinsic, notIntrinsic, notIntrinsic, notIntrinsic,
+        llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_gather4_2d}}};
+
+  static_assert(
+      (getMaxEnumValForTMALoadMode() == std::size(IDTable) - 1) &&
+          (getMaxEnumValForTMALoadMode() == std::size(IDTableCTA) - 1),
+      "TMALoadModes must match number of rows in IDTable and IDTableCTA");
+  size_t mode = static_cast<size_t>(thisOp.getMode());
+  size_t dim = thisOp.getCoordinates().size();
+  auto id = isCTAOnly ? IDTableCTA[mode][dim] : IDTable[mode][dim];
+  assert(id != notIntrinsic &&
+         "Invalid intrinsic for CpAsyncBulkTensorGlobalToSharedClusterOp.");
+
+  return {id, std::move(args)};
+}
+
 mlir::NVVM::IDArgPair CpAsyncBulkTensorPrefetchOp::getIntrinsicIDAndArgs(
     Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
   auto thisOp = cast<NVVM::CpAsyncBulkTensorPrefetchOp>(op);
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index 0c500e10bc810..5755ca9258283 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -854,7 +854,8 @@ module @mymodule {
     // CHECK: %[[desc:.+]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)>
     // CHECK: %[[c8192:.+]] = llvm.mlir.constant(8192 : index) : i64
     // CHECK: %[[shmemOfset:.+]] = llvm.getelementptr %[[desc]][%[[c8192]]] : (!llvm.ptr<3>, i64)
-    // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %[[shmemOfset]], %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}]
+    // CHECK: %[[dest:.+]] = llvm.addrspacecast %[[shmemOfset]] : !llvm.ptr<3> to !llvm.ptr<7>
+    // CHECK: nvvm.cp.async.bulk.tensor.shared.cluster.global %[[dest]], %{{.*}}, %{{.*}}, box[%{{.*}}, %{{.*}}]
     nvgpu.tma.async.load %rhsTensorMap[%c0, %c0], %mbarrier[%c0] to %rhsShmem : !rhsTensorMap, !barrierType -> memref<64x64xf16, strided<[64, 1], offset: 8192>, 3>
     return
   }
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index bf80d9a1668a1..6960e83be3573 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -96,119 +96,93 @@ func.func @cp_async_mbarrier_arrive(%bar_shared: !llvm.ptr<3>, %bar_gen: !llvm.p
 }
 
 // CHECK-LABEL: @tma_load_3d_all
-func.func @tma_load_3d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4} ], [$5],{$6}, $7, $8;", "r,l,r,r,r,r,h,h,l"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint : !llvm.ptr<3>, !llvm.ptr  
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$9 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4} ], [$5],{$6}, $7, $8;", "r,l,r,r,r,r,h,h,l,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_3d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$9 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4} ], [$5],{$6}, $7, $8;", "l,l,r,r,r,r,h,h,l,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_4d_all
-func.func @tma_load_4d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5} ], [$6],{$7,$8}, $9, $10;", "r,l,r,r,r,r,r,h,h,h,l"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] im2col[%off0,%off1] multicast_mask = %ctamask l2_cache_hint = %cacheHint : !llvm.ptr<3>, !llvm.ptr  
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$11 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5} ], [$6],{$7,$8}, $9, $10;", "r,l,r,r,r,r,r,h,h,h,l,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] im2col[%off0,%off1] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_4d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$11 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5} ], [$6],{$7,$8}, $9, $10;", "l,l,r,r,r,r,r,h,h,h,l,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] im2col[%off0,%off1] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_5d_all
-func.func @tma_load_5d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %off0: i16, %off1: i16, %off2: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5,$6} ], [$7],{$8,$9,$10}, $11, $12;", "r,l,r,r,r,r,r,r,h,h,h,h,l"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] im2col[%off0,%off1,%off2] multicast_mask = %ctamask l2_cache_hint = %cacheHint : !llvm.ptr<3>, !llvm.ptr  
-  // CHECK: lvm.inline_asm has_side_effects asm_dialect = att "@$13 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5,$6} ], [$7],{$8,$9,$10}, $11, $12;", "r,l,r,r,r,r,r,r,h,h,h,h,l,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] im2col[%off0,%off1,%off2] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_5d_all(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %off0: i16, %off1: i16, %off2: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
+  // CHECK: lvm.inline_asm has_side_effects asm_dialect = att "@$13 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.im2col.multicast::cluster.L2::cache_hint [$0], [$1, {$2,$3,$4,$5,$6} ], [$7],{$8,$9,$10}, $11, $12;", "l,l,r,r,r,r,r,r,h,h,h,h,l,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] im2col[%off0,%off1,%off2] multicast_mask = %ctamask l2_cache_hint = %cacheHint predicate = %p {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_1d
-func.func @tma_load_1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "r,l,r,r"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0] : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$4 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "r,l,r,r,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0] predicate=%p : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$4 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2} ], [$3];", "l,l,r,r,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0] predicate=%p : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_2d
-func.func @tma_load_2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "r,l,r,r,r"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1] : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "r,l,r,r,r,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1] predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3} ], [$4];", "l,l,r,r,r,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor, %barrier, box[%crd0,%crd1] predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_3d
-func.func @tma_load_3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4} ], [$5];", "r,l,r,r,r,r"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4} ], [$5];", "r,l,r,r,r,r,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4} ], [$5];", "l,l,r,r,r,r,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2] predicate=%p : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_4d
-func.func @tma_load_4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5} ], [$6];", "r,l,r,r,r,r,r"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5} ], [$6];", "r,l,r,r,r,r,r,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5} ], [$6];", "l,l,r,r,r,r,r,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_5d
-func.func @tma_load_5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6} ], [$7];", "r,l,r,r,r,r,r,r"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6} ], [$7];", "r,l,r,r,r,r,r,r,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes [$0], [$1, {$2,$3,$4,$5,$6} ], [$7];", "l,l,r,r,r,r,r,r,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd4] predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_multicast1d
-func.func @tma_load_multicast1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2} ], [$3], $4;", "r,l,r,r,h"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0] multicast_mask = %multicastMask : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2} ], [$3], $4;", "r,l,r,r,h,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0] multicast_mask = %multicastMask predicate=%p : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_multicast1d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$5 cp.async.bulk.tensor.1d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2} ], [$3], $4;", "l,l,r,r,h,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0] multicast_mask = %multicastMask predicate=%p : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_multicast2d
-func.func @tma_load_multicast2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3} ], [$4], $5;", "r,l,r,r,r,h"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1] multicast_mask = %multicastMask : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3} ], [$4], $5;", "r,l,r,r,r,h,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1] multicast_mask = %multicastMask  predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_multicast2d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$6 cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3} ], [$4], $5;", "l,l,r,r,r,h,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1] multicast_mask = %multicastMask  predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_multicast3d
-func.func @tma_load_multicast3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4} ], [$5], $6;", "r,l,r,r,r,r,h"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2] multicast_mask = %multicastMask : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4} ], [$5], $6;", "r,l,r,r,r,r,h,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2] multicast_mask = %multicastMask  predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_multicast3d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$7 cp.async.bulk.tensor.3d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4} ], [$5], $6;", "l,l,r,r,r,r,h,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2] multicast_mask = %multicastMask  predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_multicast4d
-func.func @tma_load_multicast4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5} ], [$6], $7;", "r,l,r,r,r,r,r,h"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3] multicast_mask = %multicastMask: !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5} ], [$6], $7;", "r,l,r,r,r,r,r,h,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3] multicast_mask = %multicastMask predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_multicast4d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$8 cp.async.bulk.tensor.4d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5} ], [$6], $7;", "l,l,r,r,r,r,r,h,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3] multicast_mask = %multicastMask predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
 // CHECK-LABEL: @tma_load_multicast5d
-func.func @tma_load_multicast5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5,$6} ], [$7], $8;", "r,l,r,r,r,r,r,r,h"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3,%crd4] multicast_mask = %multicastMask : !llvm.ptr<3>, !llvm.ptr
-  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$9 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5,$6} ], [$7], $8;", "r,l,r,r,r,r,r,r,h,b"
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3,%crd4] multicast_mask = %multicastMask predicate=%p  : !llvm.ptr<3>, !llvm.ptr
+func.func @tma_load_multicast5d(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %multicastMask : i16, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %p : i1) {
+  // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "@$9 cp.async.bulk.tensor.5d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [$0], [$1, {$2,$3,$4,$5,$6} ], [$7], $8;", "l,l,r,r,r,r,r,r,h,b"
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box [%crd0,%crd1,%crd2,%crd3,%crd4] multicast_mask = %multicastMask predicate=%p  : !llvm.ptr<7>, !llvm.ptr
   return
 }
 
diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index 749fb634dba76..1adecf264e8f6 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -1720,37 +1720,6 @@ llvm.func @foo(%arg: !llvm.ptr) {
 
 // -----
 
-func.func @tma_load(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // expected-error at +1 {{to use im2col mode, the tensor has to be at least 3-dimensional}}
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint : !llvm.ptr<3>, !llvm.ptr
-  return
-}
-// -----
-
-func.func @tma_load(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // expected-error at +1 {{im2col offsets must be 2 less than number of coordinates}}
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint : !llvm.ptr<3>, !llvm.ptr
-  return
-}
-
-// -----
-
-func.func @tma_load(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // expected-error at +1 {{expects coordinates between 1 to 5 dimension}}
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[]: !llvm.ptr<3>, !llvm.ptr
-  return
-}
-
-// -----
-
-func.func @tma_load(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<3>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64, %p : i1) {
-  // expected-error at +1 {{expects coordinates between 1 to 5 dimension}}
-  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3,%crd0,%crd1,%crd2,%crd3]: !llvm.ptr<3>, !llvm.ptr
-  return
-}
-
-// -----
-
 // expected-error @below {{no_inline and always_inline attributes are incompatible}}
 llvm.func @alwaysinline_noinline() attributes { always_inline, no_inline } {
   llvm.return
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
index a42344cb800db..a1e2729146c64 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
@@ -20,8 +20,8 @@
 // Basic PTX check to make sure we are generating the right instructions.
 // CHECK-PTX: mbarrier.init.shared.b64
 // CHECK-PTX: mbarrier.arrive.expect_tx.shared.b64
-// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes
-// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes
+// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes
+// CHECK-PTX: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes
 // CHECK-PTX: mbarrier.arrive.expect_tx.shared.b64
 // CHECK-PTX: mbarrier.try_wait.parity.shared.b64
 
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_im2col.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_im2col.mlir
new file mode 100644
index 0000000000000..2fb98d3c1215e
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_im2col.mlir
@@ -0,0 +1,298 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+llvm.func @tma_load_3d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %off0: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 %8, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 %8, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 0, i64 %8, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %mask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %off0: i16, %off1: i16, %off2: i16, %mask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 %12) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 %12, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 %12, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 %12, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 %12, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 %12, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 %12, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 %11, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+  
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+ 
+ nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] multicast_mask = %mask {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] {mode = #nvvm.tma_load_mode<im2col>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_3d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col_w(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col_w(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col_w(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_3d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col_w_128(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 0, i64 %9, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i16 %8, i64 %9, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col_w_128(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 0, i64 %10, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i16 %9, i64 %10, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %wHalo: i16, %wOffset: i16, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col_w_128(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 0, i64 %11, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.w.128.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col_w_128>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_tile.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_tile.mlir
new file mode 100644
index 0000000000000..de0b929e6db72
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_load_cluster_tile.mlir
@@ -0,0 +1,204 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+llvm.func @tma_load_1d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_1d_all_tile(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i16 %4, i64 %5) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 %5, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 %5, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 %5, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 %5, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 0, i64 %5, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i16 %4, i64 %5, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_2d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_2d_all_tile(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i16 %5, i64 %6) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 %6, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 %6, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 %6, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 %6, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 0, i64 %6, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i16 %5, i64 %6, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_3d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_all_tile(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 %7, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 %7, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 0, i64 %7, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_4d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_all_tile(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 %8, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 %8, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 %8, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 %8, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 0, i64 %8, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i64 %8, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_5d_all(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %ctamask: i16, %cache: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_all(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] l2_cache_hint = %cache : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask l2_cache_hint = %cache : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] l2_cache_hint = %cache {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask l2_cache_hint = %cache {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] l2_cache_hint = %cache {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] multicast_mask = %ctamask l2_cache_hint = %cache {mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  llvm.return
+}
+
+llvm.func @tma_load_2d_tile_gather4(%tma: !llvm.ptr, %dest: !llvm.ptr<7>, %bar: !llvm.ptr<3>, %row0: i32, %col0: i32, %col1: i32, %col2: i32, %col3: i32, %ctamask: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_2d_tile_gather4(ptr %0, ptr addrspace(7) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 0)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 1)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 0, i1 false, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 0, i1 true, i1 false, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 0, i64 %9, i1 false, i1 true, i32 2)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.gather4.2d(ptr addrspace(7) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i64 %9, i1 true, i1 true, i32 2)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] {mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<tile_gather4>, group = #nvvm.cta_group<cta_2>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_im2col.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_im2col.mlir
new file mode 100644
index 0000000000000..0ebae19a682be
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_im2col.mlir
@@ -0,0 +1,109 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+llvm.func @tma_load_3d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %off0: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i64 %7, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%off0] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9, i1 true)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 0, i1 false)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%off0, %off1] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %off0: i16, %off1: i16, %off2: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 %11, i1 true)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i16 %10, i64 0, i1 false)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%off0, %off1, %off2] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<3>, !llvm.ptr
+ 
+  llvm.return
+}
+
+llvm.func @tma_load_3d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col_w(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col_w(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col_w(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col_w(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 %10) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 %10, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_3d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_im2col_w_128(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i16 %6, i16 %7, i64 %8, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_4d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_im2col_w_128(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i16 %7, i16 %8, i64 %9, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_5d_im2col_w_128(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %wHalo: i16, %wOffset: i16, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_im2col_w_128(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 %10) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.im2col.w.128.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i16 %8, i16 %9, i64 %10, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] im2col[%wHalo, %wOffset] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w_128>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
\ No newline at end of file
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_tile.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_tile.mlir
new file mode 100644
index 0000000000000..f11de711ca50a
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_load_cta_tile.mlir
@@ -0,0 +1,73 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+llvm.func @tma_load_1d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_1d_all_tile(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i64 %4) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.1d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.1d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i64 %4, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_2d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_2d_all_tile(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i64 %5) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.2d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.2d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i64 %5, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_3d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_3d_all_tile(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i64 %6) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.3d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i64 %6, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_4d_all_tile(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_4d_all_tile(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.4d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i64 %7, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_5d_all(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %crd4: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_5d_all(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.5d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 %8, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] {isCTAOnly = true} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%crd0, %crd1, %crd2, %crd3, %crd4] l2_cache_hint = %cacheHint {isCTAOnly = true} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+llvm.func @tma_load_2d_tile_gather4(%tma: !llvm.ptr, %dest: !llvm.ptr<3>, %bar: !llvm.ptr<3>, %row0: i32, %col0: i32, %col1: i32, %col2: i32, %col3: i32, %cacheHint: i64) {
+  // CHECK-LABEL: define void @tma_load_2d_tile_gather4(ptr %0, ptr addrspace(3) %1, ptr addrspace(3) %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 %8) {
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.gather4.2d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 0, i1 false)
+  // CHECK-NEXT: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.cta.tile.gather4.2d(ptr addrspace(3) %1, ptr addrspace(3) %2, ptr %0, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i64 %8, i1 true)
+  // CHECK-NEXT: ret void
+  // CHECK-NEXT: }
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<3>, !llvm.ptr
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma, %bar, box[%row0, %col0, %col1, %col2, %col3] l2_cache_hint = %cacheHint {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile_gather4>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
\ No newline at end of file
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_load_invalid.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_load_invalid.mlir
new file mode 100644
index 0000000000000..d94ea41f6bb38
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_load_invalid.mlir
@@ -0,0 +1,98 @@
+// RUN: mlir-translate -verify-diagnostics -split-input-file -mlir-to-llvmir %s
+
+// -----
+
+llvm.func @tma_load_1d_im2col(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %ch : i64) {
+  // expected-error @below {{to use im2col mode, the tensor has to be at least 3-dimensional}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0] {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_0d(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<7>, %bar: !llvm.ptr<3>) {
+  // expected-error @below {{expects coordinates between 1 to 5 dimension}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[] : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_gather(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %ch : i64) {
+  // expected-error @below {{Gather4 mode expects 5 coordinates}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0,%crd1,%crd2,%crd3] l2_cache_hint=%ch {mode = #nvvm.tma_load_mode<tile_gather4>}: !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_asm_im2col(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<7>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %p : i1) {
+  // expected-error @below {{Predicate is supported only for Tile and Im2col modes.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] predicate=%p {mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+// -----
+
+llvm.func @tma_load_cta_asm_im2col(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<3>, %bar: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %wHalo: i16, %wOffset: i16, %p : i1) {
+  // expected-error @below {{Predicate is supported only for shared::cluster mode.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0, %crd1, %crd2] im2col[%wHalo, %wOffset] predicate=%p {isCTAOnly = true, mode = #nvvm.tma_load_mode<im2col_w>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_cta_0d(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<3>, %bar : !llvm.ptr<3>) {
+  // expected-error @below {{expects coordinates between 1 to 5 dimension}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[] {isCTAOnly = true} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_cta_mc(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<3>, %bar : !llvm.ptr<3>, %crd0: i32, %ctamask : i16) {
+  // expected-error @below {{Multicast is not supported with shared::cta mode.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0] multicast_mask = %ctamask {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+// -----
+
+llvm.func @tma_load_cta_cg(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<3>, %bar : !llvm.ptr<3>, %crd0: i32, %crd1: i32) {
+  // expected-error @below {{CTAGroup is not supported with shared::cta mode.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0, %crd1] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_cta_with_7(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<7>, %bar : !llvm.ptr<3>, %crd0: i32, %crd1: i32) {
+  // expected-error @below {{Shared::cta destination requires address-space 3.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0, %crd1] {isCTAOnly = true, mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_cluster_with_3(%tma_desc: !llvm.ptr, %dest : !llvm.ptr<3>, %bar : !llvm.ptr<3>, %crd0: i32, %crd1: i32) {
+  // expected-error @below {{Shared::cluster destination requires address-space 7.}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tma_desc, %bar, box[%crd0, %crd1] {isCTAOnly = false, mode = #nvvm.tma_load_mode<tile>, group = #nvvm.cta_group<cta_1>} : !llvm.ptr<3>, !llvm.ptr
+
+  llvm.return
+}
+
+// -----
+
+llvm.func @tma_load_im2col_off(%tmaDescriptor: !llvm.ptr, %dest : !llvm.ptr<7>, %barrier: !llvm.ptr<3>, %crd0: i32, %crd1: i32, %crd2: i32, %crd3: i32, %off0: i16, %off1: i16, %ctamask : i16, %cacheHint : i64) {
+  // expected-error @below {{im2col offsets expected 2 (provided 1)}}
+  nvvm.cp.async.bulk.tensor.shared.cluster.global %dest, %tmaDescriptor,  %barrier, box[%crd0,%crd1,%crd2,%crd3] im2col[%off0] multicast_mask = %ctamask l2_cache_hint = %cacheHint {mode = #nvvm.tma_load_mode<im2col>} : !llvm.ptr<7>, !llvm.ptr
+
+  llvm.return
+}

>From fadaf4fe9e980e251ab6b6b12f0eea2e5fc41df5 Mon Sep 17 00:00:00 2001
From: Stanislav Mekhanoshin <Stanislav.Mekhanoshin at amd.com>
Date: Tue, 23 Sep 2025 00:37:13 -0700
Subject: [PATCH 13/42] [AMDGPU] Add gfx1250 runline to bf16.ll. NFC (#160241)

Note that true16 version of it does not work failing to select
a mere i16 load.
---
 llvm/test/CodeGen/AMDGPU/bf16.ll | 3238 ++++++++++++++++++++++++++++++
 1 file changed, 3238 insertions(+)

diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 44c719f3635c8..371e460d9638e 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -7,6 +7,10 @@
 ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1010 | FileCheck %s -check-prefixes=GFX10
 ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11TRUE16
 ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck %s -check-prefixes=GFX11,GFX11FAKE16
+; xUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 | FileCheck %s -check-prefixes=GFX1250,GFX1250TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 | FileCheck %s -check-prefixes=GFX1250,GFX1250FAKE16
+
+; FIXME: real-true16 version of gfx1250 test fails
 
 define void @test_load_store(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; GCN-LABEL: test_load_store:
@@ -76,6 +80,15 @@ define void @test_load_store(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11FAKE16-NEXT:    global_store_b16 v[2:3], v0, off
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_u16 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b16 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load bfloat, ptr addrspace(1) %in
   store bfloat %val, ptr addrspace(1) %out
   ret void
@@ -135,6 +148,14 @@ define <2 x bfloat> @v_load_global_v2bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b32 v0, v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b32 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <2 x bfloat>, ptr addrspace(1) %ptr
   ret <2 x bfloat> %load
 }
@@ -195,6 +216,14 @@ define <3 x bfloat> @v_load_global_v3bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b64 v[0:1], v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[0:1], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <3 x bfloat>, ptr addrspace(1) %ptr
   ret <3 x bfloat> %load
 }
@@ -257,6 +286,14 @@ define <4 x bfloat> @v_load_global_v4bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b64 v[0:1], v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[0:1], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <4 x bfloat>, ptr addrspace(1) %ptr
   ret <4 x bfloat> %load
 }
@@ -323,6 +360,14 @@ define <6 x bfloat> @v_load_global_v6bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b96 v[0:2], v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v6bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b96 v[0:2], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <6 x bfloat>, ptr addrspace(1) %ptr
   ret <6 x bfloat> %load
 }
@@ -393,6 +438,14 @@ define <8 x bfloat> @v_load_global_v8bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b128 v[0:3], v[0:1], off
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[0:3], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <8 x bfloat>, ptr addrspace(1) %ptr
   ret <8 x bfloat> %load
 }
@@ -511,6 +564,17 @@ define <16 x bfloat> @v_load_global_v16bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b128 v[4:7], v[4:5], off offset:16
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b128 v[0:3], v[4:5], off
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[4:5], off offset:16
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <16 x bfloat>, ptr addrspace(1) %ptr
   ret <16 x bfloat> %load
 }
@@ -683,6 +747,19 @@ define <32 x bfloat> @v_load_global_v32bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b128 v[12:15], v[12:13], off offset:48
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v13, v1 :: v_dual_mov_b32 v12, v0
+; GFX1250-NEXT:    s_clause 0x3
+; GFX1250-NEXT:    global_load_b128 v[0:3], v[12:13], off
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[12:13], off offset:16
+; GFX1250-NEXT:    global_load_b128 v[8:11], v[12:13], off offset:32
+; GFX1250-NEXT:    global_load_b128 v[12:15], v[12:13], off offset:48
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <32 x bfloat>, ptr addrspace(1) %ptr
   ret <32 x bfloat> %load
 }
@@ -973,6 +1050,23 @@ define <64 x bfloat> @v_load_global_v64bf16(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_load_b128 v[28:31], v[28:29], off offset:112
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_load_global_v64bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v29, v1 :: v_dual_mov_b32 v28, v0
+; GFX1250-NEXT:    s_clause 0x7
+; GFX1250-NEXT:    global_load_b128 v[0:3], v[28:29], off
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[28:29], off offset:16
+; GFX1250-NEXT:    global_load_b128 v[8:11], v[28:29], off offset:32
+; GFX1250-NEXT:    global_load_b128 v[12:15], v[28:29], off offset:48
+; GFX1250-NEXT:    global_load_b128 v[16:19], v[28:29], off offset:64
+; GFX1250-NEXT:    global_load_b128 v[20:23], v[28:29], off offset:80
+; GFX1250-NEXT:    global_load_b128 v[24:27], v[28:29], off offset:96
+; GFX1250-NEXT:    global_load_b128 v[28:31], v[28:29], off offset:112
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <64 x bfloat>, ptr addrspace(1) %ptr
   ret <64 x bfloat> %load
 }
@@ -1042,6 +1136,14 @@ define void @v_store_global_v2bf16(<2 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b32 v[1:2], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <2 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -1115,6 +1217,15 @@ define void @v_store_global_v3bf16(<3 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_store_b16 v[2:3], v1, off offset:4
 ; GFX11-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_store_b16 v[2:3], v1, off offset:4
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <3 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -1183,6 +1294,13 @@ define void @v_store_global_v4bf16(<4 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <4 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -1267,6 +1385,13 @@ define void @v_store_global_v8bf16(<8 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b128 v[4:5], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <8 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -1393,6 +1518,15 @@ define void @v_store_global_v16bf16(<16 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_store_b128 v[8:9], v[4:7], off offset:16
 ; GFX11-NEXT:    global_store_b128 v[8:9], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_store_b128 v[8:9], v[4:7], off offset:16
+; GFX1250-NEXT:    global_store_b128 v[8:9], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <16 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -1610,6 +1744,17 @@ define void @v_store_global_v32bf16(<32 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_store_b128 v[16:17], v[4:7], off offset:16
 ; GFX11-NEXT:    global_store_b128 v[16:17], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x3
+; GFX1250-NEXT:    global_store_b128 v[16:17], v[12:15], off offset:48
+; GFX1250-NEXT:    global_store_b128 v[16:17], v[8:11], off offset:32
+; GFX1250-NEXT:    global_store_b128 v[16:17], v[4:7], off offset:16
+; GFX1250-NEXT:    global_store_b128 v[16:17], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <32 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -2148,6 +2293,26 @@ define void @v_store_global_v64bf16(<64 x bfloat> %val, ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    global_store_b128 v[32:33], v[4:7], off offset:16
 ; GFX11-NEXT:    global_store_b128 v[32:33], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_store_global_v64bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x2
+; GFX1250-NEXT:    scratch_load_b32 v33, off, s32 offset:8
+; GFX1250-NEXT:    scratch_load_b32 v32, off, s32 offset:4
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_clause 0x7
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[28:31], off offset:112
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[24:27], off offset:96
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[20:23], off offset:80
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[16:19], off offset:64
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[12:15], off offset:48
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[8:11], off offset:32
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[4:7], off offset:16
+; GFX1250-NEXT:    global_store_b128 v[32:33], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <64 x bfloat> %val, ptr addrspace(1) %ptr
   ret void
 }
@@ -2227,6 +2392,16 @@ define void @test_store_fpimm(ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) {
 ; GFX11FAKE16-NEXT:    global_store_b16 v[0:1], v4, off
 ; GFX11FAKE16-NEXT:    global_store_b16 v[2:3], v5, off
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_store_fpimm:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_mov_b32_e32 v4, 0x3f80
+; GFX1250-NEXT:    v_mov_b32_e32 v5, 0x4228
+; GFX1250-NEXT:    global_store_b16 v[0:1], v4, off
+; GFX1250-NEXT:    global_store_b16 v[2:3], v5, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store bfloat 1.0, ptr addrspace(1) %ptr0
   store bfloat 42.0, ptr addrspace(1) %ptr1
   ret void
@@ -2330,6 +2505,16 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v1, v4, vcc_lo
 ; GFX11-NEXT:    global_store_d16_hi_b16 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_f32_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b32 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    global_store_b16 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load float, ptr addrspace(1) %in
   %val.bf16 = fptrunc float %val to bfloat
   store bfloat %val.bf16, ptr addrspace(1) %out
@@ -2488,6 +2673,29 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v5, v4, vcc_lo
 ; GFX11-NEXT:    global_store_d16_hi_b16 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_f64_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[0:1], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_f64_e32 v6, v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
+; GFX1250-NEXT:    v_cmp_gt_f64_e64 s0, |v[0:1]|, |v[4:5]|
+; GFX1250-NEXT:    v_cmp_nlg_f64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, -1, 1, s0
+; GFX1250-NEXT:    v_dual_add_nc_u32 v0, v6, v0 :: v_dual_bitop2_b32 v7, 1, v6 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e64 s0, 1, v7
+; GFX1250-NEXT:    s_or_b32 vcc_lo, vcc_lo, s0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc_lo
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    global_store_b16 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load double, ptr addrspace(1) %in
   %val.bf16 = fptrunc double %val to bfloat
   store bfloat %val.bf16, ptr addrspace(1) %out
@@ -2560,6 +2768,16 @@ define void @test_load_store_bf16_to_f32(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_bf16_to_f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_u16 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load bfloat, ptr addrspace(1) %in
   %val.f32 = fpext bfloat %val to float
   store float %val.f32, ptr addrspace(1) %out
@@ -2639,6 +2857,18 @@ define void @test_load_store_bf16_to_f64(ptr addrspace(1) %in, ptr addrspace(1)
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_bf16_to_f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_u16 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load bfloat, ptr addrspace(1) %in
   %val.f64 = fpext bfloat %val to double
   store double %val.f64, ptr addrspace(1) %out
@@ -2705,6 +2935,15 @@ define void @test_load_store_v2bf16(ptr addrspace(1) %in, ptr addrspace(1) %out)
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b32 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load <2 x bfloat>, ptr addrspace(1) %in
   store <2 x bfloat> %val, ptr addrspace(1) %out
   ret void
@@ -2770,6 +3009,15 @@ define void @test_load_store_v4bf16(ptr addrspace(1) %in, ptr addrspace(1) %out)
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[0:1], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load <4 x bfloat>, ptr addrspace(1) %in
   store <4 x bfloat> %val, ptr addrspace(1) %out
   ret void
@@ -2835,6 +3083,15 @@ define void @test_load_store_v8bf16(ptr addrspace(1) %in, ptr addrspace(1) %out)
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    global_store_b128 v[2:3], v[4:7], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load <8 x bfloat>, ptr addrspace(1) %in
   store <8 x bfloat> %val, ptr addrspace(1) %out
   ret void
@@ -2924,6 +3181,19 @@ define void @test_load_store_v16bf16(ptr addrspace(1) %in, ptr addrspace(1) %out
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    global_store_b128 v[2:3], v[8:11], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_load_store_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[0:1], off offset:16
+; GFX1250-NEXT:    global_load_b128 v[8:11], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x1
+; GFX1250-NEXT:    global_store_b128 v[2:3], v[4:7], off offset:16
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b128 v[2:3], v[8:11], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load <16 x bfloat>, ptr addrspace(1) %in
   store <16 x bfloat> %val, ptr addrspace(1) %out
   ret void
@@ -2990,6 +3260,14 @@ define void @test_arg_store(bfloat %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b16 v[1:2], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT:    global_store_b16 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store bfloat %in, ptr addrspace(1) %out
   ret void
 }
@@ -3059,6 +3337,14 @@ define void @test_arg_store_v2bf16(<2 x bfloat> %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b32 v[1:2], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <2 x bfloat> %in, ptr addrspace(1) %out
   ret void
 }
@@ -3132,6 +3418,15 @@ define void @test_arg_store_v3bf16(<3 x bfloat> %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    global_store_b16 v[2:3], v1, off offset:4
 ; GFX11-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_store_b16 v[2:3], v1, off offset:4
+; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <3 x bfloat> %in, ptr addrspace(1) %out
   ret void
 }
@@ -3200,6 +3495,13 @@ define void @test_arg_store_v4bf16(<4 x bfloat> %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <4 x bfloat> %in, ptr addrspace(1)  %out
   ret void
 }
@@ -3284,6 +3586,13 @@ define void @test_arg_store_v8bf16(<8 x bfloat> %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b128 v[4:5], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <8 x bfloat> %in, ptr addrspace(1) %out
   ret void
 }
@@ -3410,6 +3719,15 @@ define void @test_arg_store_v16bf16(<16 x bfloat> %in, ptr addrspace(1) %out) {
 ; GFX11-NEXT:    global_store_b128 v[8:9], v[4:7], off offset:16
 ; GFX11-NEXT:    global_store_b128 v[8:9], v[0:3], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_arg_store_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_store_b128 v[8:9], v[4:7], off offset:16
+; GFX1250-NEXT:    global_store_b128 v[8:9], v[0:3], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store <16 x bfloat> %in, ptr addrspace(1) %out
   ret void
 }
@@ -3477,6 +3795,14 @@ define amdgpu_gfx void @test_inreg_arg_store(bfloat inreg %in, ptr addrspace(1)
 ; GFX11FAKE16-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX11FAKE16-NEXT:    global_store_b16 v[0:1], v2, off
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_inreg_arg_store:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, s4
+; GFX1250-NEXT:    global_store_b16 v[0:1], v2, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store bfloat %in, ptr addrspace(1) %out
   ret void
 }
@@ -3539,6 +3865,13 @@ define bfloat @test_byval(ptr addrspace(5) byval(bfloat) %bv, bfloat %val) {
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11FAKE16-NEXT:    scratch_store_b16 off, v0, s32
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_byval:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_store_b16 off, v0, s32
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store bfloat %val, ptr addrspace(5) %bv
   %retval = load bfloat, ptr addrspace(5) %bv
   ret bfloat %retval
@@ -3595,6 +3928,13 @@ define void @test_sret(ptr addrspace(5) sret(bfloat) %sret, bfloat %val) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    scratch_store_b16 v0, v1, off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_sret:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_store_b16 v0, v1, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   store bfloat %val, ptr addrspace(5) %sret
   ret void
 }
@@ -3667,6 +4007,15 @@ define void @test_bitcast_from_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %ou
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11FAKE16-NEXT:    global_store_b16 v[2:3], v0, off
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_bitcast_from_bfloat:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_u16 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b16 v[2:3], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load bfloat, ptr addrspace(1) %in
   %val_int = bitcast bfloat %val to i16
   store i16 %val_int, ptr addrspace(1) %out
@@ -3741,6 +4090,15 @@ define void @test_bitcast_to_bfloat(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11FAKE16-NEXT:    global_store_b16 v[0:1], v2, off
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_bitcast_to_bfloat:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_u16 v2, v[2:3], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_store_b16 v[0:1], v2, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %val = load i16, ptr addrspace(1) %in
   %val_fp = bitcast i16 %val to bfloat
   store bfloat %val_fp, ptr addrspace(1) %out
@@ -3777,6 +4135,12 @@ define bfloat @test_ret(bfloat %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret bfloat %in
 }
@@ -3811,6 +4175,12 @@ define <2 x bfloat> @test_ret_v2bf16(<2 x bfloat> %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret_v2bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret <2 x bfloat> %in
 }
@@ -3845,6 +4215,12 @@ define <3 x bfloat> @test_ret_v3bf16(<3 x bfloat> %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret_v3bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret <3 x bfloat> %in
 }
@@ -3879,6 +4255,12 @@ define <4 x bfloat> @test_ret_v4bf16(<4 x bfloat> %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret_v4bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret <4 x bfloat> %in
 }
@@ -3913,6 +4295,12 @@ define <8 x bfloat> @test_ret_v8bf16(<8 x bfloat> %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret_v8bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret <8 x bfloat> %in
 }
@@ -3947,6 +4335,12 @@ define <16 x bfloat> @test_ret_v16bf16(<16 x bfloat> %in) {
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_ret_v16bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   ret <16 x bfloat> %in
 }
@@ -4161,6 +4555,38 @@ define void @test_call(bfloat %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v4, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v4, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_writelane_b32 v4, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b16 v1, v0, off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v4, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v4, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v4, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call bfloat @test_arg_store(bfloat %in)
   store volatile bfloat %result, ptr addrspace(5) %out
@@ -4387,6 +4813,38 @@ define void @test_call_v2bf16(<2 x bfloat> %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call_v2bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v4, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store_v2bf16 at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v4, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_writelane_b32 v4, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b32 v1, v0, off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v4, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v4, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v4, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call <2 x bfloat> @test_arg_store_v2bf16(<2 x bfloat> %in)
   store volatile <2 x bfloat> %result, ptr addrspace(5) %out
@@ -4629,6 +5087,41 @@ define void @test_call_v3bf16(<3 x bfloat> %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call_v3bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v5, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store_v2bf16 at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v5, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_mov_b32_e32 v4, v2
+; GFX1250-NEXT:    v_writelane_b32 v5, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b16 v4, v1, off offset:4 scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    scratch_store_b32 v4, v0, off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v5, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v5, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v5, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call <3 x bfloat> @test_arg_store_v2bf16(<3 x bfloat> %in)
   store volatile <3 x bfloat> %result, ptr addrspace(5) %out
@@ -4883,6 +5376,39 @@ define void @test_call_v4bf16(<4 x bfloat> %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call_v4bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v5, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store_v2bf16 at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v5, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_mov_b32_e32 v4, v2
+; GFX1250-NEXT:    v_writelane_b32 v5, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b64 v4, v[0:1], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v5, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v5, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v5, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call <4 x bfloat> @test_arg_store_v2bf16(<4 x bfloat> %in)
   store volatile <4 x bfloat> %result, ptr addrspace(5) %out
@@ -5190,6 +5716,38 @@ define void @test_call_v8bf16(<8 x bfloat> %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call_v8bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v5, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store_v2bf16 at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v5, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_writelane_b32 v5, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b128 v4, v[0:3], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v5, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v5, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v5, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call <8 x bfloat> @test_arg_store_v2bf16(<8 x bfloat> %in)
   store volatile <8 x bfloat> %result, ptr addrspace(5) %out
@@ -5609,6 +6167,40 @@ define void @test_call_v16bf16(<16 x bfloat> %in, ptr addrspace(5) %out) {
 ; GFX11-NEXT:    s_mov_b32 s33, s2
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_call_v16bf16:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 s2, s33
+; GFX1250-NEXT:    s_mov_b32 s33, s32
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_store_b32 off, v9, s33 ; 4-byte Folded Spill
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_get_pc_i64 s[0:1]
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], test_arg_store_v2bf16 at gotpcrel+4
+; GFX1250-NEXT:    v_writelane_b32 v9, s30, 0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1250-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX1250-NEXT:    v_writelane_b32 v9, s31, 1
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_swap_pc_i64 s[30:31], s[0:1]
+; GFX1250-NEXT:    scratch_store_b128 v8, v[4:7], off offset:16 scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    scratch_store_b128 v8, v[0:3], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    v_readlane_b32 s31, v9, 1
+; GFX1250-NEXT:    v_readlane_b32 s30, v9, 0
+; GFX1250-NEXT:    s_mov_b32 s32, s33
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_xor_saveexec_b32 s0, -1
+; GFX1250-NEXT:    scratch_load_b32 v9, off, s33 ; 4-byte Folded Reload
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_mov_b32 exec_lo, s0
+; GFX1250-NEXT:    s_mov_b32 s33, s2
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %result = call <16 x bfloat> @test_arg_store_v2bf16(<16 x bfloat> %in)
   store volatile <16 x bfloat> %result, ptr addrspace(5) %out
@@ -5693,6 +6285,16 @@ define bfloat @test_alloca_load_store_ret(bfloat %in) {
 ; GFX11FAKE16-NEXT:    scratch_load_u16 v0, off, s32 glc dlc
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_alloca_load_store_ret:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_store_b16 off, v0, s32 scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    scratch_load_u16 v0, off, s32 scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
 entry:
   %in.addr = alloca bfloat, align 2, addrspace(5)
   store volatile bfloat %in, ptr addrspace(5) %in.addr, align 2
@@ -6105,6 +6707,28 @@ define { <32 x i32>, bfloat } @test_overflow_stack(bfloat %a, <32 x i32> %b) {
 ; GFX11FAKE16-NEXT:    scratch_store_b128 v0, v[26:29], off offset:96
 ; GFX11FAKE16-NEXT:    scratch_store_b16 v0, v1, off offset:128
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_overflow_stack:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x2
+; GFX1250-NEXT:    scratch_load_b32 v33, off, s32 offset:8
+; GFX1250-NEXT:    scratch_load_b32 v32, off, s32 offset:4
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    s_clause 0x5
+; GFX1250-NEXT:    scratch_store_b128 v0, v[22:25], off offset:80
+; GFX1250-NEXT:    scratch_store_b128 v0, v[18:21], off offset:64
+; GFX1250-NEXT:    scratch_store_b128 v0, v[14:17], off offset:48
+; GFX1250-NEXT:    scratch_store_b128 v0, v[10:13], off offset:32
+; GFX1250-NEXT:    scratch_store_b128 v0, v[6:9], off offset:16
+; GFX1250-NEXT:    scratch_store_b128 v0, v[2:5], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_clause 0x2
+; GFX1250-NEXT:    scratch_store_b128 v0, v[30:33], off offset:112
+; GFX1250-NEXT:    scratch_store_b128 v0, v[26:29], off offset:96
+; GFX1250-NEXT:    scratch_store_b16 v0, v1, off offset:128
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %ins.0 = insertvalue { <32 x i32>, bfloat } poison, <32 x i32> %b, 0
   %ins.1 = insertvalue { <32 x i32>, bfloat } %ins.0 ,bfloat %a, 1
   ret { <32 x i32>, bfloat } %ins.1
@@ -6172,6 +6796,16 @@ define <2 x float> @global_extload_v2bf16_to_v2f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v0, 16, v1
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v2bf16_to_v2f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b32 v1, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <2 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <2 x bfloat> %load to <2 x float>
   ret <2 x float> %fpext
@@ -6255,6 +6889,17 @@ define <3 x float> @global_extload_v3bf16_to_v3f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v3bf16_to_v3f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v2
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <3 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <3 x bfloat> %load to <3 x float>
   ret <3 x float> %fpext
@@ -6334,6 +6979,18 @@ define <4 x float> @global_extload_v4bf16_to_v4f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
 ; GFX11-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v4bf16_to_v4f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v2
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <4 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <4 x bfloat> %load to <4 x float>
   ret <4 x float> %fpext
@@ -6423,6 +7080,19 @@ define <5 x float> @global_extload_v5bf16_to_v5f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v5bf16_to_v5f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[2:5], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v2
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <5 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <5 x bfloat> %load to <5 x float>
   ret <5 x float> %fpext
@@ -6527,6 +7197,19 @@ define <6 x float> @global_extload_v6bf16_to_v6f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v4, 16, v5
 ; GFX11-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v6bf16_to_v6f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b96 v[4:6], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v4 :: v_dual_lshlrev_b32 v2, 16, v5
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v6
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <6 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <6 x bfloat> %load to <6 x float>
   ret <6 x float> %fpext
@@ -6630,6 +7313,21 @@ define <8 x float> @global_extload_v8bf16_to_v8f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 16, v7
 ; GFX11-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v8bf16_to_v8f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v4 :: v_dual_lshlrev_b32 v2, 16, v5
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v6
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v6, 16, v7
+; GFX1250-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <8 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <8 x bfloat> %load to <8 x float>
   ret <8 x float> %fpext
@@ -6797,6 +7495,32 @@ define <16 x float> @global_extload_v16bf16_to_v16f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v14, 16, v15
 ; GFX11-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v16bf16_to_v16f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[0:1], off
+; GFX1250-NEXT:    global_load_b128 v[12:15], v[0:1], off offset:16
+; GFX1250-NEXT:    s_wait_loadcnt 0x1
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v4 :: v_dual_lshlrev_b32 v2, 16, v5
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v6
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v6, 16, v7
+; GFX1250-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v8, 16, v12 :: v_dual_lshlrev_b32 v10, 16, v13
+; GFX1250-NEXT:    v_and_b32_e32 v9, 0xffff0000, v12
+; GFX1250-NEXT:    v_and_b32_e32 v11, 0xffff0000, v13
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v12, 16, v14
+; GFX1250-NEXT:    v_and_b32_e32 v13, 0xffff0000, v14
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v14, 16, v15
+; GFX1250-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <16 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <16 x bfloat> %load to <16 x float>
   ret <16 x float> %fpext
@@ -7088,6 +7812,50 @@ define <32 x float> @global_extload_v32bf16_to_v32f32(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v30, 16, v31
 ; GFX11-NEXT:    v_and_b32_e32 v31, 0xffff0000, v31
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v32bf16_to_v32f32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x3
+; GFX1250-NEXT:    global_load_b128 v[4:7], v[0:1], off
+; GFX1250-NEXT:    global_load_b128 v[12:15], v[0:1], off offset:16
+; GFX1250-NEXT:    global_load_b128 v[20:23], v[0:1], off offset:32
+; GFX1250-NEXT:    global_load_b128 v[28:31], v[0:1], off offset:48
+; GFX1250-NEXT:    s_wait_loadcnt 0x3
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v4 :: v_dual_lshlrev_b32 v2, 16, v5
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v6
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v6
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v6, 16, v7
+; GFX1250-NEXT:    v_and_b32_e32 v7, 0xffff0000, v7
+; GFX1250-NEXT:    s_wait_loadcnt 0x2
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v8, 16, v12 :: v_dual_lshlrev_b32 v10, 16, v13
+; GFX1250-NEXT:    v_and_b32_e32 v9, 0xffff0000, v12
+; GFX1250-NEXT:    v_and_b32_e32 v11, 0xffff0000, v13
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v12, 16, v14
+; GFX1250-NEXT:    v_and_b32_e32 v13, 0xffff0000, v14
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v14, 16, v15
+; GFX1250-NEXT:    v_and_b32_e32 v15, 0xffff0000, v15
+; GFX1250-NEXT:    s_wait_loadcnt 0x1
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v16, 16, v20 :: v_dual_lshlrev_b32 v18, 16, v21
+; GFX1250-NEXT:    v_and_b32_e32 v17, 0xffff0000, v20
+; GFX1250-NEXT:    v_and_b32_e32 v19, 0xffff0000, v21
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v20, 16, v22
+; GFX1250-NEXT:    v_and_b32_e32 v21, 0xffff0000, v22
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v22, 16, v23
+; GFX1250-NEXT:    v_and_b32_e32 v23, 0xffff0000, v23
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v24, 16, v28 :: v_dual_lshlrev_b32 v26, 16, v29
+; GFX1250-NEXT:    v_and_b32_e32 v25, 0xffff0000, v28
+; GFX1250-NEXT:    v_and_b32_e32 v27, 0xffff0000, v29
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v28, 16, v30
+; GFX1250-NEXT:    v_and_b32_e32 v29, 0xffff0000, v30
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v30, 16, v31
+; GFX1250-NEXT:    v_and_b32_e32 v31, 0xffff0000, v31
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <32 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <32 x bfloat> %load to <32 x float>
   ret <32 x float> %fpext
@@ -7179,6 +7947,19 @@ define <2 x double> @global_extload_v2bf16_to_v2f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v2bf16_to_v2f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b32 v0, v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <2 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <2 x bfloat> %load to <2 x double>
   ret <2 x double> %fpext
@@ -7285,6 +8066,22 @@ define <3 x double> @global_extload_v3bf16_to_v3f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v3bf16_to_v3f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[0:1], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v4, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v0
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <3 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <3 x bfloat> %load to <3 x double>
   ret <3 x double> %fpext
@@ -7390,6 +8187,23 @@ define <4 x double> @global_extload_v4bf16_to_v4f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v4bf16_to_v4f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b64 v[2:3], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v2 :: v_dual_lshlrev_b32 v4, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX1250-NEXT:    v_and_b32_e32 v6, 0xffff0000, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <4 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <4 x bfloat> %load to <4 x double>
   ret <4 x double> %fpext
@@ -7509,6 +8323,24 @@ define <5 x double> @global_extload_v5bf16_to_v5f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v5bf16_to_v5f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[2:5], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v2 :: v_dual_lshlrev_b32 v5, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX1250-NEXT:    v_and_b32_e32 v6, 0xffff0000, v3
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v8, 16, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v5
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <5 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <5 x bfloat> %load to <5 x double>
   ret <5 x double> %fpext
@@ -7636,6 +8468,26 @@ define <6 x double> @global_extload_v6bf16_to_v6f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[10:11], v10
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v6bf16_to_v6f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b96 v[4:6], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v4
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v4, 16, v5
+; GFX1250-NEXT:    v_and_b32_e32 v7, 0xffff0000, v5
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v8, 16, v6
+; GFX1250-NEXT:    v_and_b32_e32 v10, 0xffff0000, v6
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v7
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[10:11], v10
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <6 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <6 x bfloat> %load to <6 x double>
   ret <6 x double> %fpext
@@ -7787,6 +8639,28 @@ define <8 x double> @global_extload_v8bf16_to_v8f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[12:13], v12
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[14:15], v14
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v8bf16_to_v8f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_load_b128 v[8:11], v[0:1], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v8 :: v_dual_lshlrev_b32 v4, 16, v9
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v8
+; GFX1250-NEXT:    v_and_b32_e32 v6, 0xffff0000, v9
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v8, 16, v10 :: v_dual_lshlrev_b32 v12, 16, v11
+; GFX1250-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX1250-NEXT:    v_and_b32_e32 v14, 0xffff0000, v11
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[10:11], v10
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[12:13], v12
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[14:15], v14
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <8 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <8 x bfloat> %load to <8 x double>
   ret <8 x double> %fpext
@@ -8050,6 +8924,46 @@ define <16 x double> @global_extload_v16bf16_to_v16f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[28:29], v28
 ; GFX11-NEXT:    v_cvt_f64_f32_e32 v[30:31], v30
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v16bf16_to_v16f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b128 v[8:11], v[0:1], off
+; GFX1250-NEXT:    global_load_b128 v[24:27], v[0:1], off offset:16
+; GFX1250-NEXT:    s_wait_loadcnt 0x1
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v8 :: v_dual_lshlrev_b32 v4, 16, v9
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v8
+; GFX1250-NEXT:    v_and_b32_e32 v6, 0xffff0000, v9
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v8, 16, v10 :: v_dual_lshlrev_b32 v12, 16, v11
+; GFX1250-NEXT:    v_and_b32_e32 v10, 0xffff0000, v10
+; GFX1250-NEXT:    v_and_b32_e32 v14, 0xffff0000, v11
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v16, 16, v24 :: v_dual_lshlrev_b32 v20, 16, v25
+; GFX1250-NEXT:    v_and_b32_e32 v18, 0xffff0000, v24
+; GFX1250-NEXT:    v_and_b32_e32 v22, 0xffff0000, v25
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v24, 16, v26 :: v_dual_lshlrev_b32 v28, 16, v27
+; GFX1250-NEXT:    v_and_b32_e32 v26, 0xffff0000, v26
+; GFX1250-NEXT:    v_and_b32_e32 v30, 0xffff0000, v27
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[0:1], v0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v4
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v6
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v8
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[10:11], v10
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[12:13], v12
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[14:15], v14
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[16:17], v16
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[18:19], v18
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[20:21], v20
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[22:23], v22
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[24:25], v24
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[26:27], v26
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[28:29], v28
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[30:31], v30
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <16 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <16 x bfloat> %load to <16 x double>
   ret <16 x double> %fpext
@@ -9570,6 +10484,131 @@ define <32 x double> @global_extload_v32bf16_to_v32f64(ptr addrspace(1) %ptr) {
 ; GFX11-NEXT:    scratch_store_b128 v0, v[5:8], off offset:16
 ; GFX11-NEXT:    scratch_store_b128 v0, v[1:4], off
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: global_extload_v32bf16_to_v32f64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT:    s_clause 0x1f
+; GFX1250-NEXT:    global_load_u16 v1, v[2:3], off offset:2
+; GFX1250-NEXT:    global_load_u16 v4, v[2:3], off offset:12
+; GFX1250-NEXT:    global_load_u16 v5, v[2:3], off offset:8
+; GFX1250-NEXT:    global_load_u16 v6, v[2:3], off offset:4
+; GFX1250-NEXT:    global_load_u16 v7, v[2:3], off
+; GFX1250-NEXT:    global_load_u16 v8, v[2:3], off offset:6
+; GFX1250-NEXT:    global_load_u16 v9, v[2:3], off offset:10
+; GFX1250-NEXT:    global_load_u16 v10, v[2:3], off offset:14
+; GFX1250-NEXT:    global_load_u16 v11, v[2:3], off offset:18
+; GFX1250-NEXT:    global_load_u16 v12, v[2:3], off offset:62
+; GFX1250-NEXT:    global_load_u16 v13, v[2:3], off offset:60
+; GFX1250-NEXT:    global_load_u16 v14, v[2:3], off offset:58
+; GFX1250-NEXT:    global_load_u16 v15, v[2:3], off offset:56
+; GFX1250-NEXT:    global_load_u16 v16, v[2:3], off offset:28
+; GFX1250-NEXT:    global_load_u16 v17, v[2:3], off offset:24
+; GFX1250-NEXT:    global_load_u16 v18, v[2:3], off offset:20
+; GFX1250-NEXT:    global_load_u16 v19, v[2:3], off offset:16
+; GFX1250-NEXT:    global_load_u16 v20, v[2:3], off offset:22
+; GFX1250-NEXT:    global_load_u16 v21, v[2:3], off offset:26
+; GFX1250-NEXT:    global_load_u16 v22, v[2:3], off offset:30
+; GFX1250-NEXT:    global_load_u16 v23, v[2:3], off offset:34
+; GFX1250-NEXT:    global_load_u16 v24, v[2:3], off offset:44
+; GFX1250-NEXT:    global_load_u16 v25, v[2:3], off offset:40
+; GFX1250-NEXT:    global_load_u16 v26, v[2:3], off offset:36
+; GFX1250-NEXT:    global_load_u16 v27, v[2:3], off offset:32
+; GFX1250-NEXT:    global_load_u16 v28, v[2:3], off offset:38
+; GFX1250-NEXT:    global_load_u16 v29, v[2:3], off offset:42
+; GFX1250-NEXT:    global_load_u16 v30, v[2:3], off offset:46
+; GFX1250-NEXT:    global_load_u16 v31, v[2:3], off offset:50
+; GFX1250-NEXT:    global_load_u16 v32, v[2:3], off offset:52
+; GFX1250-NEXT:    global_load_u16 v33, v[2:3], off offset:48
+; GFX1250-NEXT:    global_load_u16 v34, v[2:3], off offset:54
+; GFX1250-NEXT:    s_wait_loadcnt 0x1e
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v37, 16, v4
+; GFX1250-NEXT:    s_wait_loadcnt 0x1c
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v81, 16, v5 :: v_dual_lshlrev_b32 v85, 16, v6
+; GFX1250-NEXT:    s_wait_loadcnt 0x1a
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v84, 16, v7 :: v_dual_lshlrev_b32 v35, 16, v8
+; GFX1250-NEXT:    s_wait_loadcnt 0x18
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v80, 16, v9 :: v_dual_lshlrev_b32 v36, 16, v10
+; GFX1250-NEXT:    s_wait_loadcnt 0x15
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v12 :: v_dual_lshlrev_b32 v3, 16, v13
+; GFX1250-NEXT:    s_wait_loadcnt 0x14
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v12, 16, v11 :: v_dual_lshlrev_b32 v6, 16, v14
+; GFX1250-NEXT:    s_wait_loadcnt 0x13
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v7, 16, v15
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v2
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v3
+; GFX1250-NEXT:    s_wait_loadcnt 0x11
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v68, 16, v17 :: v_dual_lshlrev_b32 v39, 16, v16
+; GFX1250-NEXT:    s_wait_loadcnt 0xe
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v20, 16, v20
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v6
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[6:7], v7
+; GFX1250-NEXT:    s_wait_loadcnt 0xc
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v21, 16, v21 :: v_dual_lshlrev_b32 v38, 16, v22
+; GFX1250-NEXT:    s_wait_loadcnt 0x9
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v48, 16, v23 :: v_dual_lshlrev_b32 v25, 16, v25
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v24, 16, v24
+; GFX1250-NEXT:    s_wait_loadcnt 0x5
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v49, 16, v28 :: v_dual_lshlrev_b32 v64, 16, v29
+; GFX1250-NEXT:    s_wait_loadcnt 0x3
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v50, 16, v30 :: v_dual_lshlrev_b32 v51, 16, v31
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v33, 16, v33 :: v_dual_lshlrev_b32 v52, 16, v34
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v32, 16, v32 :: v_dual_lshlrev_b32 v69, 16, v27
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v70, 16, v26
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[14:15], v35
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[54:55], v52
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[52:53], v32
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[30:31], v38
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[28:29], v39
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[34:35], v48
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[38:39], v49
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[48:49], v33
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v13, 16, v19 :: v_dual_lshlrev_b32 v82, 16, v18
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[66:67], v64
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[64:65], v25
+; GFX1250-NEXT:    scratch_store_b128 v0, v[2:5], off offset:240
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[4:5], v50
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[50:51], v51
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[2:3], v24
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[18:19], v36
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[16:17], v37
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[36:37], v70
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[32:33], v69
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[70:71], v21
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[68:69], v68
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[26:27], v20
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[24:25], v82
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[22:23], v12
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[20:21], v13
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[82:83], v80
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[80:81], v81
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[12:13], v85
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[10:11], v1
+; GFX1250-NEXT:    scratch_store_b128 v0, v[6:9], off offset:224
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_cvt_f64_f32_e32 v[8:9], v84
+; GFX1250-NEXT:    s_clause 0xd
+; GFX1250-NEXT:    scratch_store_b128 v0, v[52:55], off offset:208
+; GFX1250-NEXT:    scratch_store_b128 v0, v[48:51], off offset:192
+; GFX1250-NEXT:    scratch_store_b128 v0, v[2:5], off offset:176
+; GFX1250-NEXT:    scratch_store_b128 v0, v[64:67], off offset:160
+; GFX1250-NEXT:    scratch_store_b128 v0, v[36:39], off offset:144
+; GFX1250-NEXT:    scratch_store_b128 v0, v[32:35], off offset:128
+; GFX1250-NEXT:    scratch_store_b128 v0, v[28:31], off offset:112
+; GFX1250-NEXT:    scratch_store_b128 v0, v[68:71], off offset:96
+; GFX1250-NEXT:    scratch_store_b128 v0, v[24:27], off offset:80
+; GFX1250-NEXT:    scratch_store_b128 v0, v[20:23], off offset:64
+; GFX1250-NEXT:    scratch_store_b128 v0, v[16:19], off offset:48
+; GFX1250-NEXT:    scratch_store_b128 v0, v[80:83], off offset:32
+; GFX1250-NEXT:    scratch_store_b128 v0, v[12:15], off offset:16
+; GFX1250-NEXT:    scratch_store_b128 v0, v[8:11], off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %load = load <32 x bfloat>, ptr addrspace(1) %ptr
   %fpext = fpext <32 x bfloat> %load to <32 x double>
   ret <32 x double> %fpext
@@ -9686,6 +10725,16 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd bfloat %a, %b
   ret bfloat %op
 }
@@ -9859,6 +10908,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <2 x bfloat> %a, %b
   ret <2 x bfloat> %op
 }
@@ -10093,6 +11149,14 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_add_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <3 x bfloat> %a, %b
   ret <3 x bfloat> %op
 }
@@ -10383,6 +11447,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_add_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <4 x bfloat> %a, %b
   ret <4 x bfloat> %op
 }
@@ -10921,6 +11993,16 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v3, v3, v8, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v4
+; GFX1250-NEXT:    v_pk_add_bf16 v1, v1, v5
+; GFX1250-NEXT:    v_pk_add_bf16 v2, v2, v6
+; GFX1250-NEXT:    v_pk_add_bf16 v3, v3, v7
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <8 x bfloat> %a, %b
   ret <8 x bfloat> %op
 }
@@ -11951,6 +13033,20 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v4, v4, v13, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v8
+; GFX1250-NEXT:    v_pk_add_bf16 v1, v1, v9
+; GFX1250-NEXT:    v_pk_add_bf16 v2, v2, v10
+; GFX1250-NEXT:    v_pk_add_bf16 v3, v3, v11
+; GFX1250-NEXT:    v_pk_add_bf16 v4, v4, v12
+; GFX1250-NEXT:    v_pk_add_bf16 v5, v5, v13
+; GFX1250-NEXT:    v_pk_add_bf16 v6, v6, v14
+; GFX1250-NEXT:    v_pk_add_bf16 v7, v7, v15
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <16 x bfloat> %a, %b
   ret <16 x bfloat> %op
 }
@@ -14043,6 +15139,30 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v16
+; GFX1250-NEXT:    v_pk_add_bf16 v1, v1, v17
+; GFX1250-NEXT:    v_pk_add_bf16 v2, v2, v18
+; GFX1250-NEXT:    v_pk_add_bf16 v3, v3, v19
+; GFX1250-NEXT:    v_pk_add_bf16 v4, v4, v20
+; GFX1250-NEXT:    v_pk_add_bf16 v5, v5, v21
+; GFX1250-NEXT:    v_pk_add_bf16 v6, v6, v22
+; GFX1250-NEXT:    v_pk_add_bf16 v7, v7, v23
+; GFX1250-NEXT:    v_pk_add_bf16 v8, v8, v24
+; GFX1250-NEXT:    v_pk_add_bf16 v9, v9, v25
+; GFX1250-NEXT:    v_pk_add_bf16 v10, v10, v26
+; GFX1250-NEXT:    v_pk_add_bf16 v11, v11, v27
+; GFX1250-NEXT:    v_pk_add_bf16 v12, v12, v28
+; GFX1250-NEXT:    v_pk_add_bf16 v13, v13, v29
+; GFX1250-NEXT:    v_pk_add_bf16 v14, v14, v30
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v15, v15, v31
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fadd <32 x bfloat> %a, %b
   ret <32 x bfloat> %op
 }
@@ -14147,6 +15267,16 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_bf16_fpimm_0:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_f32_e32 v0, 1.0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %add = fadd bfloat %arg0, 1.0
   ret bfloat %add
 }
@@ -14251,6 +15381,16 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fadd_bf16_fpimm_1:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_f32_e32 v0, 0x42280000, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %add = fadd bfloat %arg0, 42.0
   ret bfloat %add
 }
@@ -14366,6 +15506,16 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fsub_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fsub bfloat %a, %b
   ret bfloat %op
 }
@@ -14539,6 +15689,13 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fsub_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_add_bf16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fsub <2 x bfloat> %a, %b
   ret <2 x bfloat> %op
 }
@@ -14773,6 +15930,22 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fsub_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v4, 0xffff0000, v2
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v2 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_dual_sub_f32 v4, v5, v4 :: v_dual_lshlrev_b32 v1, 16, v1
+; GFX1250-NEXT:    v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v4
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fsub <3 x bfloat> %a, %b
   ret <3 x bfloat> %op
 }
@@ -15063,6 +16236,25 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fsub_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v4, 0xffff0000, v3
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v1
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX1250-NEXT:    v_and_b32_e32 v6, 0xffff0000, v2
+; GFX1250-NEXT:    v_and_b32_e32 v7, 0xffff0000, v0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v2 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    v_dual_sub_f32 v4, v5, v4 :: v_dual_lshlrev_b32 v1, 16, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_sub_f32_e32 v5, v7, v6
+; GFX1250-NEXT:    v_dual_sub_f32 v0, v0, v2 :: v_dual_sub_f32 v1, v1, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v5
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, v4
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fsub <4 x bfloat> %a, %b
   ret <4 x bfloat> %op
 }
@@ -15178,6 +16370,13 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_fma_mixlo_bf16 v0, v0, v1, 0 op_sel_hi:[1,1,0]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul bfloat %a, %b
   ret bfloat %op
 }
@@ -15351,6 +16550,13 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <2 x bfloat> %a, %b
   ret <2 x bfloat> %op
 }
@@ -15585,6 +16791,14 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_mul_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <3 x bfloat> %a, %b
   ret <3 x bfloat> %op
 }
@@ -15875,6 +17089,14 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_mul_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <4 x bfloat> %a, %b
   ret <4 x bfloat> %op
 }
@@ -16413,6 +17635,16 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v3, v3, v8, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v4
+; GFX1250-NEXT:    v_pk_mul_bf16 v1, v1, v5
+; GFX1250-NEXT:    v_pk_mul_bf16 v2, v2, v6
+; GFX1250-NEXT:    v_pk_mul_bf16 v3, v3, v7
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <8 x bfloat> %a, %b
   ret <8 x bfloat> %op
 }
@@ -17443,6 +18675,20 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v4, v4, v13, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v8
+; GFX1250-NEXT:    v_pk_mul_bf16 v1, v1, v9
+; GFX1250-NEXT:    v_pk_mul_bf16 v2, v2, v10
+; GFX1250-NEXT:    v_pk_mul_bf16 v3, v3, v11
+; GFX1250-NEXT:    v_pk_mul_bf16 v4, v4, v12
+; GFX1250-NEXT:    v_pk_mul_bf16 v5, v5, v13
+; GFX1250-NEXT:    v_pk_mul_bf16 v6, v6, v14
+; GFX1250-NEXT:    v_pk_mul_bf16 v7, v7, v15
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <16 x bfloat> %a, %b
   ret <16 x bfloat> %op
 }
@@ -19535,6 +20781,30 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmul_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    v_pk_mul_bf16 v0, v0, v16
+; GFX1250-NEXT:    v_pk_mul_bf16 v1, v1, v17
+; GFX1250-NEXT:    v_pk_mul_bf16 v2, v2, v18
+; GFX1250-NEXT:    v_pk_mul_bf16 v3, v3, v19
+; GFX1250-NEXT:    v_pk_mul_bf16 v4, v4, v20
+; GFX1250-NEXT:    v_pk_mul_bf16 v5, v5, v21
+; GFX1250-NEXT:    v_pk_mul_bf16 v6, v6, v22
+; GFX1250-NEXT:    v_pk_mul_bf16 v7, v7, v23
+; GFX1250-NEXT:    v_pk_mul_bf16 v8, v8, v24
+; GFX1250-NEXT:    v_pk_mul_bf16 v9, v9, v25
+; GFX1250-NEXT:    v_pk_mul_bf16 v10, v10, v26
+; GFX1250-NEXT:    v_pk_mul_bf16 v11, v11, v27
+; GFX1250-NEXT:    v_pk_mul_bf16 v12, v12, v28
+; GFX1250-NEXT:    v_pk_mul_bf16 v13, v13, v29
+; GFX1250-NEXT:    v_pk_mul_bf16 v14, v14, v30
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_pk_mul_bf16 v15, v15, v31
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fmul <32 x bfloat> %a, %b
   ret <32 x bfloat> %op
 }
@@ -19741,6 +21011,32 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fdiv_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_div_scale_f32 v2, null, v1, v1, v0
+; GFX1250-NEXT:    v_rcp_f32_e32 v3, v2
+; GFX1250-NEXT:    v_nop
+; GFX1250-NEXT:    s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_fma_f32 v4, -v2, v3, 1.0
+; GFX1250-NEXT:    v_fmac_f32_e32 v3, v4, v3
+; GFX1250-NEXT:    v_div_scale_f32 v4, vcc_lo, v0, v1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_mul_f32_e32 v5, v4, v3
+; GFX1250-NEXT:    v_fma_f32 v6, -v2, v5, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_fmac_f32_e32 v5, v6, v3
+; GFX1250-NEXT:    v_fma_f32 v2, -v2, v5, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_div_fmas_f32 v2, v2, v3, v5
+; GFX1250-NEXT:    v_div_fixup_f32 v0, v2, v1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fdiv bfloat %a, %b
   ret bfloat %op
 }
@@ -19795,6 +21091,13 @@ define bfloat @v_fabs_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fabs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.fabs.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -19838,6 +21141,13 @@ define amdgpu_ps i32 @s_fabs_bf16(bfloat inreg %a) {
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_fabs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_and_b32 s0, s0, 0x7fff
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX1250-NEXT:    ; return to shader part epilog
   %op = call bfloat @llvm.fabs.bf16(bfloat %a)
   %cast = bitcast bfloat %op to i16
   %zext = zext i16 %cast to i32
@@ -19887,6 +21197,13 @@ define bfloat @v_fneg_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11FAKE16-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fneg_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fneg bfloat %a
   ret bfloat %op
 }
@@ -19933,6 +21250,13 @@ define amdgpu_ps i32 @s_fneg_bf16(bfloat inreg %a) {
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_fneg_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_xor_b32 s0, s0, 0x8000
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX1250-NEXT:    ; return to shader part epilog
   %op = fneg bfloat %a
   %cast = bitcast bfloat %op to i16
   %zext = zext i16 %cast to i32
@@ -19992,6 +21316,13 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11FAKE16-NEXT:    v_or_b32_e32 v0, 0x8000, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fneg_fabs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_or_b32_e32 v0, 0x8000, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %fabs = call bfloat @llvm.fabs.bf16(bfloat %a)
   %op = fneg bfloat %fabs
   ret bfloat %op
@@ -20045,6 +21376,13 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_and_b32 s0, 0xffff, s0
 ; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_fneg_fabs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_bitset1_b32 s0, 15
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_and_b32 s0, 0xffff, s0
+; GFX1250-NEXT:    ; return to shader part epilog
   %fabs = call bfloat @llvm.fabs.bf16(bfloat %a)
   %op = fneg bfloat %fabs
   %cast = bitcast bfloat %op to i16
@@ -20172,6 +21510,16 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_min_num_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.minnum.bf16(bfloat %a, bfloat %b)
   ret bfloat %op
 }
@@ -20345,6 +21693,13 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b)
   ret <2 x bfloat> %op
 }
@@ -20579,6 +21934,14 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_min_num_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <3 x bfloat> @llvm.minnum.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b)
   ret <3 x bfloat> %op
 }
@@ -20869,6 +22232,14 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_min_num_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <4 x bfloat> @llvm.minnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b)
   ret <4 x bfloat> %op
 }
@@ -21407,6 +22778,16 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v3, v3, v8, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v4
+; GFX1250-NEXT:    v_pk_min_num_bf16 v1, v1, v5
+; GFX1250-NEXT:    v_pk_min_num_bf16 v2, v2, v6
+; GFX1250-NEXT:    v_pk_min_num_bf16 v3, v3, v7
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <8 x bfloat> @llvm.minnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
   ret <8 x bfloat> %op
 }
@@ -22437,6 +23818,20 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v4, v4, v13, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v8
+; GFX1250-NEXT:    v_pk_min_num_bf16 v1, v1, v9
+; GFX1250-NEXT:    v_pk_min_num_bf16 v2, v2, v10
+; GFX1250-NEXT:    v_pk_min_num_bf16 v3, v3, v11
+; GFX1250-NEXT:    v_pk_min_num_bf16 v4, v4, v12
+; GFX1250-NEXT:    v_pk_min_num_bf16 v5, v5, v13
+; GFX1250-NEXT:    v_pk_min_num_bf16 v6, v6, v14
+; GFX1250-NEXT:    v_pk_min_num_bf16 v7, v7, v15
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <16 x bfloat> @llvm.minnum.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b)
   ret <16 x bfloat> %op
 }
@@ -24529,6 +25924,30 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_minnum_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    v_pk_min_num_bf16 v0, v0, v16
+; GFX1250-NEXT:    v_pk_min_num_bf16 v1, v1, v17
+; GFX1250-NEXT:    v_pk_min_num_bf16 v2, v2, v18
+; GFX1250-NEXT:    v_pk_min_num_bf16 v3, v3, v19
+; GFX1250-NEXT:    v_pk_min_num_bf16 v4, v4, v20
+; GFX1250-NEXT:    v_pk_min_num_bf16 v5, v5, v21
+; GFX1250-NEXT:    v_pk_min_num_bf16 v6, v6, v22
+; GFX1250-NEXT:    v_pk_min_num_bf16 v7, v7, v23
+; GFX1250-NEXT:    v_pk_min_num_bf16 v8, v8, v24
+; GFX1250-NEXT:    v_pk_min_num_bf16 v9, v9, v25
+; GFX1250-NEXT:    v_pk_min_num_bf16 v10, v10, v26
+; GFX1250-NEXT:    v_pk_min_num_bf16 v11, v11, v27
+; GFX1250-NEXT:    v_pk_min_num_bf16 v12, v12, v28
+; GFX1250-NEXT:    v_pk_min_num_bf16 v13, v13, v29
+; GFX1250-NEXT:    v_pk_min_num_bf16 v14, v14, v30
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_pk_min_num_bf16 v15, v15, v31
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <32 x bfloat> @llvm.minnum.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b)
   ret <32 x bfloat> %op
 }
@@ -24653,6 +26072,16 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_max_num_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.maxnum.bf16(bfloat %a, bfloat %b)
   ret bfloat %op
 }
@@ -24826,6 +26255,13 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b)
   ret <2 x bfloat> %op
 }
@@ -25060,6 +26496,14 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_max_num_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b)
   ret <3 x bfloat> %op
 }
@@ -25350,6 +26794,14 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v4, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v2
+; GFX1250-NEXT:    v_pk_max_num_bf16 v1, v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <4 x bfloat> @llvm.maxnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b)
   ret <4 x bfloat> %op
 }
@@ -25888,6 +27340,16 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v3, v3, v8, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v4
+; GFX1250-NEXT:    v_pk_max_num_bf16 v1, v1, v5
+; GFX1250-NEXT:    v_pk_max_num_bf16 v2, v2, v6
+; GFX1250-NEXT:    v_pk_max_num_bf16 v3, v3, v7
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <8 x bfloat> @llvm.maxnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
   ret <8 x bfloat> %op
 }
@@ -26918,6 +28380,20 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v4, v4, v13, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v8
+; GFX1250-NEXT:    v_pk_max_num_bf16 v1, v1, v9
+; GFX1250-NEXT:    v_pk_max_num_bf16 v2, v2, v10
+; GFX1250-NEXT:    v_pk_max_num_bf16 v3, v3, v11
+; GFX1250-NEXT:    v_pk_max_num_bf16 v4, v4, v12
+; GFX1250-NEXT:    v_pk_max_num_bf16 v5, v5, v13
+; GFX1250-NEXT:    v_pk_max_num_bf16 v6, v6, v14
+; GFX1250-NEXT:    v_pk_max_num_bf16 v7, v7, v15
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <16 x bfloat> @llvm.maxnum.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b)
   ret <16 x bfloat> %op
 }
@@ -29010,6 +30486,30 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v15, v15, v17, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_maxnum_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    v_pk_max_num_bf16 v0, v0, v16
+; GFX1250-NEXT:    v_pk_max_num_bf16 v1, v1, v17
+; GFX1250-NEXT:    v_pk_max_num_bf16 v2, v2, v18
+; GFX1250-NEXT:    v_pk_max_num_bf16 v3, v3, v19
+; GFX1250-NEXT:    v_pk_max_num_bf16 v4, v4, v20
+; GFX1250-NEXT:    v_pk_max_num_bf16 v5, v5, v21
+; GFX1250-NEXT:    v_pk_max_num_bf16 v6, v6, v22
+; GFX1250-NEXT:    v_pk_max_num_bf16 v7, v7, v23
+; GFX1250-NEXT:    v_pk_max_num_bf16 v8, v8, v24
+; GFX1250-NEXT:    v_pk_max_num_bf16 v9, v9, v25
+; GFX1250-NEXT:    v_pk_max_num_bf16 v10, v10, v26
+; GFX1250-NEXT:    v_pk_max_num_bf16 v11, v11, v27
+; GFX1250-NEXT:    v_pk_max_num_bf16 v12, v12, v28
+; GFX1250-NEXT:    v_pk_max_num_bf16 v13, v13, v29
+; GFX1250-NEXT:    v_pk_max_num_bf16 v14, v14, v30
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_pk_max_num_bf16 v15, v15, v31
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <32 x bfloat> @llvm.maxnum.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b)
   ret <32 x bfloat> %op
 }
@@ -29263,6 +30763,13 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sqrt_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_sqrt_bf16_e32 v0, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.sqrt.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -29369,6 +30876,16 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_ldexp_bf16_i32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.ldexp.bf16.i32(bfloat %a, i32 %b)
   ret bfloat %op
 }
@@ -29487,6 +31004,17 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_frexp_bf16_i16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_frexp_mant_f32_e32 v0, v1
+; GFX1250-NEXT:    v_frexp_exp_i32_f32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call { bfloat, i16 } @llvm.frexp.bf16.i16(bfloat %a)
   ret { bfloat, i16 } %op
 }
@@ -29725,6 +31253,32 @@ define bfloat @v_log_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_log_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
+; GFX1250-NEXT:    v_cndmask_b32_e64 v1, 0, 32, vcc_lo
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
+; GFX1250-NEXT:    v_log_f32_e32 v0, v0
+; GFX1250-NEXT:    v_nop
+; GFX1250-NEXT:    v_mul_f32_e32 v1, 0x3f317217, v0
+; GFX1250-NEXT:    v_cmp_gt_f32_e64 s0, 0x7f800000, |v0|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_fma_f32 v2, 0x3f317217, v0, -v1
+; GFX1250-NEXT:    v_fmamk_f32 v2, v0, 0x3377d1cf, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s0
+; GFX1250-NEXT:    v_cndmask_b32_e64 v1, 0, 0x41b17218, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.log.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -29884,6 +31438,13 @@ define bfloat @v_log2_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_log2_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_log_bf16_e32 v0, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.log2.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30117,6 +31678,32 @@ define bfloat @v_log10_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_log10_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
+; GFX1250-NEXT:    v_cndmask_b32_e64 v1, 0, 32, vcc_lo
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
+; GFX1250-NEXT:    v_log_f32_e32 v0, v0
+; GFX1250-NEXT:    v_nop
+; GFX1250-NEXT:    v_mul_f32_e32 v1, 0x3e9a209a, v0
+; GFX1250-NEXT:    v_cmp_gt_f32_e64 s0, 0x7f800000, |v0|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_fma_f32 v2, 0x3e9a209a, v0, -v1
+; GFX1250-NEXT:    v_fmamk_f32 v2, v0, 0x3284fbcf, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_f32_e32 v1, v1, v2
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s0
+; GFX1250-NEXT:    v_cndmask_b32_e64 v1, 0, 0x411a209b, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_f32_e32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.log10.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30358,6 +31945,35 @@ define bfloat @v_exp_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_exp_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    s_mov_b32 s0, 0x3fb8aa3b
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_mul_f32_e32 v2, 0x3fb8aa3b, v1
+; GFX1250-NEXT:    v_rndne_f32_e32 v3, v2
+; GFX1250-NEXT:    v_fma_mix_f32_bf16 v4, v0, s0, -v2 op_sel_hi:[1,0,0]
+; GFX1250-NEXT:    s_mov_b32 s0, 0x32a5705f
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_sub_f32_e32 v2, v2, v3
+; GFX1250-NEXT:    v_fma_mix_f32_bf16 v0, v0, s0, v4 op_sel_hi:[1,0,0]
+; GFX1250-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_add_f32_e32 v0, v2, v0
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v3
+; GFX1250-NEXT:    v_exp_f32_e32 v0, v0
+; GFX1250-NEXT:    v_nop
+; GFX1250-NEXT:    s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v2
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX1250-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, 0x7f800000, v0, vcc_lo
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.exp.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30521,6 +32137,13 @@ define bfloat @v_exp2_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_exp2_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_exp_bf16_e32 v0, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.exp2.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30758,6 +32381,35 @@ define bfloat @v_exp10_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_exp10_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    s_mov_b32 s0, 0x40549a78
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_mul_f32_e32 v2, 0x40549a78, v1
+; GFX1250-NEXT:    v_rndne_f32_e32 v3, v2
+; GFX1250-NEXT:    v_fma_mix_f32_bf16 v4, v0, s0, -v2 op_sel_hi:[1,0,0]
+; GFX1250-NEXT:    s_mov_b32 s0, 0x33979a37
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_sub_f32_e32 v2, v2, v3
+; GFX1250-NEXT:    v_fma_mix_f32_bf16 v0, v0, s0, v4 op_sel_hi:[1,0,0]
+; GFX1250-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, 0xc23369f4, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_add_f32_e32 v0, v2, v0
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v3
+; GFX1250-NEXT:    v_exp_f32_e32 v0, v0
+; GFX1250-NEXT:    v_nop
+; GFX1250-NEXT:    s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v2
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX1250-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, 0x421a209b, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, 0x7f800000, v0, vcc_lo
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.exp10.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30864,6 +32516,16 @@ define bfloat @v_ceil_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_ceil_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_ceil_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.ceil.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -30970,6 +32632,16 @@ define bfloat @v_trunc_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_trunc_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_trunc_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.trunc.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31076,6 +32748,16 @@ define bfloat @v_rint_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_rint_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_rndne_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.rint.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31182,6 +32864,16 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_nearbyint_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_rndne_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.nearbyint.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31338,6 +33030,24 @@ define bfloat @v_round_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_round_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_trunc_f32_e32 v1, v0
+; GFX1250-NEXT:    v_sub_f32_e32 v2, v0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_ge_f32_e64 s0, |v2|, 0.5
+; GFX1250-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_bfi_b32 v0, 0x7fffffff, v2, v0
+; GFX1250-NEXT:    v_add_f32_e32 v0, v1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.round.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31444,6 +33154,16 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_roundeven_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_rndne_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.roundeven.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31550,6 +33270,16 @@ define bfloat @v_floor_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_floor_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_floor_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.floor.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31654,6 +33384,16 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_canonicalize_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_max_num_f32_e32 v0, v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.canonicalize.bf16(bfloat %a)
   ret bfloat %op
 }
@@ -31702,6 +33442,13 @@ define i1 @v_fcmp_false_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_false_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp false bfloat %a, %b
   ret i1 %op
 }
@@ -31787,6 +33534,16 @@ define i1 @v_fcmp_oeq_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_eq_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_oeq_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp oeq bfloat %a, %b
   ret i1 %op
 }
@@ -31872,6 +33629,16 @@ define i1 @v_fcmp_ogt_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ogt_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_gt_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ogt bfloat %a, %b
   ret i1 %op
 }
@@ -31957,6 +33724,16 @@ define i1 @v_fcmp_oge_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_oge_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_ge_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp oge bfloat %a, %b
   ret i1 %op
 }
@@ -32042,6 +33819,16 @@ define i1 @v_fcmp_olt_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_olt_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_lt_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp olt bfloat %a, %b
   ret i1 %op
 }
@@ -32127,6 +33914,16 @@ define i1 @v_fcmp_ole_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ole_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_le_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ole bfloat %a, %b
   ret i1 %op
 }
@@ -32212,6 +34009,16 @@ define i1 @v_fcmp_one_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_lg_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_one_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_lg_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp one bfloat %a, %b
   ret i1 %op
 }
@@ -32297,6 +34104,16 @@ define i1 @v_fcmp_uno_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_uno_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp uno bfloat %a, %b
   ret i1 %op
 }
@@ -32382,6 +34199,16 @@ define i1 @v_fcmp_ueq_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_nlg_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ueq_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_nlg_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ueq bfloat %a, %b
   ret i1 %op
 }
@@ -32467,6 +34294,16 @@ define i1 @v_fcmp_ugt_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_nle_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ugt_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_nle_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ugt bfloat %a, %b
   ret i1 %op
 }
@@ -32552,6 +34389,16 @@ define i1 @v_fcmp_uge_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_uge_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_nlt_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp uge bfloat %a, %b
   ret i1 %op
 }
@@ -32637,6 +34484,16 @@ define i1 @v_fcmp_ult_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ult_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_nge_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ult bfloat %a, %b
   ret i1 %op
 }
@@ -32722,6 +34579,16 @@ define i1 @v_fcmp_ule_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_ule_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_ngt_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp ule bfloat %a, %b
   ret i1 %op
 }
@@ -32807,6 +34674,16 @@ define i1 @v_fcmp_une_bf16(bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_neq_f32_e32 vcc_lo, v0, v1
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_une_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_neq_f32_e32 vcc_lo, v0, v1
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp une bfloat %a, %b
   ret i1 %op
 }
@@ -32847,6 +34724,13 @@ define i1 @v_fcmp_true_bf16(bfloat %a, bfloat %b) {
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fcmp_true_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_mov_b32_e32 v0, 1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fcmp true bfloat %a, %b
   ret i1 %op
 }
@@ -32905,6 +34789,15 @@ define i16 @v_fptosi_bf16_to_i16(bfloat %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_cvt_i32_f32_e32 v0, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_bf16_to_i16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi bfloat %x to i16
   ret i16 %op
 }
@@ -33005,6 +34898,19 @@ define <2 x i16> @v_fptosi_v2bf16_to_v2i16(<2 x bfloat> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v2bf16_to_v2i16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_perm_b32 v0, v0, v1, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <2 x bfloat> %x to <2 x i16>
   ret <2 x i16> %op
 }
@@ -33125,6 +35031,20 @@ define <3 x i16> @v_fptosi_v3bf16_to_v3i16(<3 x bfloat> %x) {
 ; GFX11FAKE16-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v2, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v3bf16_to_v3i16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GFX1250-NEXT:    v_perm_b32 v0, v0, v2, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <3 x bfloat> %x to <3 x i16>
   ret <3 x i16> %op
 }
@@ -33277,6 +35197,24 @@ define <4 x i16> @v_fptosi_v4bf16_to_v4i16(<4 x bfloat> %x) {
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v3, 0x5040100
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v2, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v4bf16_to_v4i16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v1 :: v_dual_lshlrev_b32 v3, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_perm_b32 v0, v0, v3, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v1, v1, v2, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <4 x bfloat> %x to <4 x i16>
   ret <4 x i16> %op
 }
@@ -33335,6 +35273,15 @@ define i32 @v_fptosi_bf16_to_i32(bfloat %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_cvt_i32_f32_e32 v0, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_bf16_to_i32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi bfloat %x to i32
   ret i32 %op
 }
@@ -33400,6 +35347,17 @@ define <2 x i32> @v_fptosi_v2bf16_to_v2i32(<2 x bfloat> %x) {
 ; GFX11-NEXT:    v_cvt_i32_f32_e32 v0, v1
 ; GFX11-NEXT:    v_cvt_i32_f32_e32 v1, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v2bf16_to_v2i32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v2, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v1
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <2 x bfloat> %x to <2 x i32>
   ret <2 x i32> %op
 }
@@ -33482,6 +35440,19 @@ define <3 x i32> @v_fptosi_v3bf16_to_v3i32(<3 x bfloat> %x) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11-NEXT:    v_cvt_i32_f32_e32 v2, v4
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v3bf16_to_v3i32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v4, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v2
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <3 x bfloat> %x to <3 x i32>
   ret <3 x i32> %op
 }
@@ -33578,6 +35549,21 @@ define <4 x i32> @v_fptosi_v4bf16_to_v4i32(<4 x bfloat> %x) {
 ; GFX11-NEXT:    v_cvt_i32_f32_e32 v2, v4
 ; GFX11-NEXT:    v_cvt_i32_f32_e32 v3, v5
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v4bf16_to_v4i32:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v4, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v3, 0xffff0000, v0
+; GFX1250-NEXT:    v_and_b32_e32 v5, 0xffff0000, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v0, v2
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v2, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v1, v3
+; GFX1250-NEXT:    v_cvt_i32_f32_e32 v3, v5
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <4 x bfloat> %x to <4 x i32>
   ret <4 x i32> %op
 }
@@ -33742,6 +35728,27 @@ define i64 @v_fptosi_bf16_to_i64(bfloat %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_bf16_to_i64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_trunc_f32_e32 v0, v0
+; GFX1250-NEXT:    v_mul_f32_e64 v1, 0x2f800000, |v0|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_floor_f32_e32 v1, v1
+; GFX1250-NEXT:    v_fma_f32 v2, 0xcf800000, v1, |v0|
+; GFX1250-NEXT:    v_ashrrev_i32_e32 v0, 31, v0
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v3, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GFX1250-NEXT:    v_dual_mov_b32 v1, v0 :: v_dual_bitop2_b32 v3, v3, v0 bitop3:0x14
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_xor_b32_e32 v2, v2, v0
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[0:1], v[2:3], v[0:1]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi bfloat %x to i64
   ret i64 %op
 }
@@ -33973,6 +35980,42 @@ define <2 x i64> @v_fptosi_v2bf16_to_v2i64(<2 x bfloat> %x) {
 ; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v4, v6
 ; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v3, v6, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v2bf16_to_v2i64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshlrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_trunc_f32_e32 v3, v0
+; GFX1250-NEXT:    v_mul_f32_e64 v2, 0x2f800000, |v3|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_floor_f32_e32 v5, v2
+; GFX1250-NEXT:    v_ashrrev_i32_e32 v2, 31, v3
+; GFX1250-NEXT:    v_trunc_f32_e32 v1, v1
+; GFX1250-NEXT:    v_fma_f32 v3, 0xcf800000, v5, |v3|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v7, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_mul_f32_e64 v0, 0x2f800000, |v1|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v8, v3
+; GFX1250-NEXT:    v_mov_b32_e32 v3, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_floor_f32_e32 v4, v0
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v0, 31, v1 :: v_dual_bitop2_b32 v7, v7, v2 bitop3:0x14
+; GFX1250-NEXT:    v_fma_f32 v6, 0xcf800000, v4, |v1|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v4, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GFX1250-NEXT:    v_mov_b32_e32 v1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_xor_b32_e32 v5, v4, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v4, v6, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v6, v8, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[0:1], v[4:5], v[0:1]
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], v[6:7], v[2:3]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <2 x bfloat> %x to <2 x i64>
   ret <2 x i64> %op
 }
@@ -34293,6 +36336,52 @@ define <3 x i64> @v_fptosi_v3bf16_to_v3i64(<3 x bfloat> %x) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_ci_u32_e64 v5, null, v6, v8, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v3bf16_to_v3i64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_trunc_f32_e32 v6, v2
+; GFX1250-NEXT:    v_trunc_f32_e32 v8, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_trunc_f32_e32 v7, v0
+; GFX1250-NEXT:    v_mul_f32_e64 v1, 0x2f800000, |v6|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_mul_f32_e64 v5, 0x2f800000, |v8|
+; GFX1250-NEXT:    v_mul_f32_e64 v3, 0x2f800000, |v7|
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v0, 31, v6 :: v_dual_ashrrev_i32 v2, 31, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_floor_f32_e32 v9, v1
+; GFX1250-NEXT:    v_floor_f32_e32 v11, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_floor_f32_e32 v10, v3
+; GFX1250-NEXT:    v_dual_mov_b32 v1, v0 :: v_dual_ashrrev_i32 v4, 31, v8
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_fma_f32 v6, 0xcf800000, v9, |v6|
+; GFX1250-NEXT:    v_fma_f32 v8, 0xcf800000, v11, |v8|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX1250-NEXT:    v_fma_f32 v7, 0xcf800000, v10, |v7|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v9, v9
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v10, v10
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v11, v11
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v12, v7
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v13, v8
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT:    v_xor_b32_e32 v7, v9, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v6, v6, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v9, v10, v2
+; GFX1250-NEXT:    v_xor_b32_e32 v8, v12, v2
+; GFX1250-NEXT:    v_xor_b32_e32 v11, v11, v4
+; GFX1250-NEXT:    v_xor_b32_e32 v10, v13, v4
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[0:1], v[6:7], v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], v[8:9], v[2:3]
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[4:5], v[10:11], v[4:5]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <3 x bfloat> %x to <3 x i64>
   ret <3 x i64> %op
 }
@@ -34698,6 +36787,61 @@ define <4 x i64> @v_fptosi_v4bf16_to_v4i64(<4 x bfloat> %x) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_ci_u32_e64 v7, null, v7, v13, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fptosi_v4bf16_to_v4i64:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshlrev_b32 v2, 16, v0 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_trunc_f32_e32 v7, v2
+; GFX1250-NEXT:    v_trunc_f32_e32 v9, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_trunc_f32_e32 v8, v0
+; GFX1250-NEXT:    v_trunc_f32_e32 v10, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_mul_f32_e64 v1, 0x2f800000, |v7|
+; GFX1250-NEXT:    v_mul_f32_e64 v5, 0x2f800000, |v9|
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_mul_f32_e64 v3, 0x2f800000, |v8|
+; GFX1250-NEXT:    v_mul_f32_e64 v11, 0x2f800000, |v10|
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v0, 31, v7 :: v_dual_ashrrev_i32 v2, 31, v8
+; GFX1250-NEXT:    v_floor_f32_e32 v12, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX1250-NEXT:    v_floor_f32_e32 v13, v3
+; GFX1250-NEXT:    v_floor_f32_e32 v14, v5
+; GFX1250-NEXT:    v_floor_f32_e32 v11, v11
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v4, 31, v9 :: v_dual_ashrrev_i32 v6, 31, v10
+; GFX1250-NEXT:    v_fma_f32 v7, 0xcf800000, v12, |v7|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v12, v12
+; GFX1250-NEXT:    v_fma_f32 v8, 0xcf800000, v13, |v8|
+; GFX1250-NEXT:    v_fma_f32 v15, 0xcf800000, v14, |v9|
+; GFX1250-NEXT:    v_fma_f32 v16, 0xcf800000, v11, |v10|
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v7, v7
+; GFX1250-NEXT:    v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT:    v_mov_b32_e32 v5, v4
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v13, v13
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v14, v14
+; GFX1250-NEXT:    v_xor_b32_e32 v9, v12, v0
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v10, v8
+; GFX1250-NEXT:    v_xor_b32_e32 v8, v7, v0
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v12, v11
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v7, v15
+; GFX1250-NEXT:    v_cvt_u32_f32_e32 v15, v16
+; GFX1250-NEXT:    v_xor_b32_e32 v11, v13, v2
+; GFX1250-NEXT:    v_xor_b32_e32 v10, v10, v2
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[0:1], v[8:9], v[0:1]
+; GFX1250-NEXT:    v_xor_b32_e32 v9, v14, v4
+; GFX1250-NEXT:    v_dual_mov_b32 v7, v6 :: v_dual_bitop2_b32 v8, v7, v4 bitop3:0x14
+; GFX1250-NEXT:    v_xor_b32_e32 v13, v12, v6
+; GFX1250-NEXT:    v_xor_b32_e32 v12, v15, v6
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], v[10:11], v[2:3]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[4:5], v[8:9], v[4:5]
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[6:7], v[12:13], v[6:7]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = fptosi <4 x bfloat> %x to <4 x i64>
   ret <4 x i64> %op
 }
@@ -34795,6 +36939,16 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_i16_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp i16 %x to bfloat
   ret bfloat %op
 }
@@ -34936,6 +37090,19 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v2i16_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_ashrrev_i32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <2 x i16> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -35125,6 +37292,23 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v3i16_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_ashrrev_i32_e32 v2, 16, v0
+; GFX1250-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <3 x i16> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -35355,6 +37539,24 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v4i16_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v2, 16, v1 :: v_dual_ashrrev_i32 v3, 16, v0
+; GFX1250-NEXT:    v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT:    v_bfe_i32 v1, v1, 0, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <4 x i16> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -35446,6 +37648,15 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_i32_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp i32 %x to bfloat
   ret bfloat %op
 }
@@ -35577,6 +37788,16 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v2i32_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <2 x i32> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -35750,6 +37971,18 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v4, v6, vcc_lo
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v2, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v3i32_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v2, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <3 x i32> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -35959,6 +38192,19 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v6, v4, vcc_lo
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v3, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v4i32_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v3, v3
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v2, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <4 x i32> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -36148,6 +38394,28 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_i64_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_xor_b32_e32 v2, v0, v1
+; GFX1250-NEXT:    v_cls_i32_e32 v3, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_ashrrev_i32_e32 v2, 31, v2
+; GFX1250-NEXT:    v_add_nc_u32_e32 v2, 32, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_min_u32_e64 v2, v3, -1, v2
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v2, v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v1, 32, v2 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp i64 %x to bfloat
   ret bfloat %op
 }
@@ -36474,6 +38742,40 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v2i64_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_xor_b32_e32 v5, v0, v1
+; GFX1250-NEXT:    v_xor_b32_e32 v4, v2, v3
+; GFX1250-NEXT:    v_cls_i32_e32 v6, v3
+; GFX1250-NEXT:    v_cls_i32_e32 v7, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v5, 31, v5 :: v_dual_ashrrev_i32 v4, 31, v4
+; GFX1250-NEXT:    v_dual_add_nc_u32 v5, 32, v5 :: v_dual_add_nc_u32 v4, 32, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_add_min_u32_e64 v5, v7, -1, v5
+; GFX1250-NEXT:    v_add_min_u32_e64 v4, v6, -1, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v5, v[0:1]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v4, v[2:3]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v1, 32, v4 :: v_dual_bitop2_b32 v2, v3, v2 bitop3:0x54
+; GFX1250-NEXT:    v_sub_nc_u32_e32 v3, 32, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v3
+; GFX1250-NEXT:    v_ldexp_f32 v1, v2, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <2 x i64> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -36929,6 +39231,53 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v3i64_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_xor_b32_e32 v8, v4, v5
+; GFX1250-NEXT:    v_xor_b32_e32 v6, v2, v3
+; GFX1250-NEXT:    v_cls_i32_e32 v10, v3
+; GFX1250-NEXT:    v_cls_i32_e32 v9, v5
+; GFX1250-NEXT:    v_cls_i32_e32 v11, v1
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v8, 31, v8 :: v_dual_bitop2_b32 v7, v0, v1 bitop3:0x14
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v6, 31, v6 :: v_dual_ashrrev_i32 v7, 31, v7
+; GFX1250-NEXT:    v_dual_add_nc_u32 v6, 32, v6 :: v_dual_add_nc_u32 v7, 32, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_add_min_u32_e64 v6, v10, -1, v6
+; GFX1250-NEXT:    v_add_min_u32_e64 v7, v11, -1, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v6, v[2:3]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v7, v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    v_add_nc_u32_e32 v8, 32, v8
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX1250-NEXT:    v_add_min_u32_e64 v8, v9, -1, v8
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v3, 32, v6 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[4:5], v8, v[4:5]
+; GFX1250-NEXT:    v_sub_nc_u32_e32 v8, 32, v8
+; GFX1250-NEXT:    v_ldexp_f32 v2, v2, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_min_u32_e32 v4, 1, v4
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v4, 32, v7 :: v_dual_bitop2_b32 v1, v5, v4 bitop3:0x54
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_ldexp_f32 v1, v1, v8
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <3 x i64> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -37509,6 +39858,64 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v4, v9, vcc_lo
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v3, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_sitofp_v4i64_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_xor_b32_e32 v9, v4, v5
+; GFX1250-NEXT:    v_xor_b32_e32 v8, v6, v7
+; GFX1250-NEXT:    v_cls_i32_e32 v12, v7
+; GFX1250-NEXT:    v_cls_i32_e32 v13, v5
+; GFX1250-NEXT:    v_cls_i32_e32 v14, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v9, 31, v9 :: v_dual_ashrrev_i32 v8, 31, v8
+; GFX1250-NEXT:    v_xor_b32_e32 v10, v2, v3
+; GFX1250-NEXT:    v_cls_i32_e32 v15, v1
+; GFX1250-NEXT:    v_dual_add_nc_u32 v9, 32, v9 :: v_dual_add_nc_u32 v8, 32, v8
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v10, 31, v10 :: v_dual_bitop2_b32 v11, v0, v1 bitop3:0x14
+; GFX1250-NEXT:    v_add_min_u32_e64 v9, v13, -1, v9
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_add_min_u32_e64 v8, v12, -1, v8
+; GFX1250-NEXT:    v_dual_ashrrev_i32 v11, 31, v11 :: v_dual_add_nc_u32 v10, 32, v10
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[4:5], v9, v[4:5]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[6:7], v8, v[6:7]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_add_nc_u32_e32 v11, 32, v11
+; GFX1250-NEXT:    v_add_min_u32_e64 v10, v14, -1, v10
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_add_min_u32_e64 v11, v15, -1, v11
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v10, v[2:3]
+; GFX1250-NEXT:    v_min_u32_e32 v6, 1, v6
+; GFX1250-NEXT:    v_min_u32_e32 v4, 1, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v11, v[0:1]
+; GFX1250-NEXT:    v_or_b32_e32 v6, v7, v6
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v5, 32, v10 :: v_dual_bitop2_b32 v4, v5, v4 bitop3:0x54
+; GFX1250-NEXT:    v_sub_nc_u32_e32 v7, 32, v9
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v3, 32, v8 :: v_dual_bitop2_b32 v2, v3, v2 bitop3:0x54
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v4, v4
+; GFX1250-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v1, v6
+; GFX1250-NEXT:    v_sub_nc_u32_e32 v6, 32, v11
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GFX1250-NEXT:    v_ldexp_f32 v1, v1, v3
+; GFX1250-NEXT:    v_ldexp_f32 v3, v4, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_ldexp_f32 v2, v2, v5
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v6
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v3, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = sitofp <4 x i64> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -37607,6 +40014,16 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_i16_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp i16 %x to bfloat
   ret bfloat %op
 }
@@ -37749,6 +40166,19 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v2i16_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <2 x i16> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -37942,6 +40372,23 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v4, v6, vcc_lo
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v3i16_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <3 x i16> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -38178,6 +40625,24 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v6, v7, vcc_lo
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v4i16_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v2, 16, v1 :: v_dual_lshrrev_b32 v3, 16, v0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <4 x i16> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -38269,6 +40734,15 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_i32_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp i32 %x to bfloat
   ret bfloat %op
 }
@@ -38400,6 +40874,16 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v2i32_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <2 x i32> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -38573,6 +41057,18 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v4, v6, vcc_lo
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v2, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v3i32_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v2, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <3 x i32> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -38782,6 +41278,19 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v6, v4, vcc_lo
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v3, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v4i32_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v2, v3
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <4 x i32> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -38935,6 +41444,24 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_i64_to_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v2, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_min_u32_e32 v2, 32, v2
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v2, v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v1, 32, v2 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp i64 %x to bfloat
   ret bfloat %op
 }
@@ -39190,6 +41717,35 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v2i64_to_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v4, v3
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v5, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_min_u32_e32 v4, 32, v4
+; GFX1250-NEXT:    v_min_u32_e32 v5, 32, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v4, v[2:3]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v5, v[0:1]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v3, 32, v5 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    v_sub_nc_u32_e32 v1, 32, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_ldexp_f32 v1, v2, v1
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v1
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <2 x i64> %x to <2 x bfloat>
   ret <2 x bfloat> %op
 }
@@ -39548,6 +42104,45 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v3i64_to_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v6, v3
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v7, v1
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v8, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_min_u32_e32 v6, 32, v6
+; GFX1250-NEXT:    v_min_u32_e32 v7, 32, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_min_u32_e32 v8, 32, v8
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v6, v[2:3]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v7, v[0:1]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[4:5], v8, v[4:5]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_min_u32_e32 v4, 1, v4
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v8, 32, v8 :: v_dual_bitop2_b32 v2, v3, v2 bitop3:0x54
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v3, 32, v6 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v4, 32, v7 :: v_dual_bitop2_b32 v1, v5, v4 bitop3:0x54
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    v_ldexp_f32 v2, v2, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v4
+; GFX1250-NEXT:    v_ldexp_f32 v1, v1, v8
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <3 x i64> %x to <3 x bfloat>
   ret <3 x bfloat> %op
 }
@@ -39996,6 +42591,54 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v3, v2, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_uitofp_v4i64_to_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v8, v7
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v9, v3
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v10, v1
+; GFX1250-NEXT:    v_clz_i32_u32_e32 v11, v5
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_min_u32_e32 v8, 32, v8
+; GFX1250-NEXT:    v_min_u32_e32 v9, 32, v9
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_min_u32_e32 v10, 32, v10
+; GFX1250-NEXT:    v_min_u32_e32 v11, 32, v11
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[6:7], v8, v[6:7]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[2:3], v9, v[2:3]
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[0:1], v10, v[0:1]
+; GFX1250-NEXT:    v_lshlrev_b64_e32 v[4:5], v11, v[4:5]
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v8, 32, v8 :: v_dual_sub_nc_u32 v11, 32, v11
+; GFX1250-NEXT:    v_min_u32_e32 v6, 1, v6
+; GFX1250-NEXT:    v_min_u32_e32 v2, 1, v2
+; GFX1250-NEXT:    v_min_u32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_min_u32_e32 v4, 1, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v9, 32, v9 :: v_dual_bitop2_b32 v6, v7, v6 bitop3:0x54
+; GFX1250-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_dual_sub_nc_u32 v3, 32, v10 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
+; GFX1250-NEXT:    v_or_b32_e32 v1, v5, v4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v4, v6
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_ldexp_f32 v4, v4, v8
+; GFX1250-NEXT:    v_ldexp_f32 v2, v2, v9
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_ldexp_f32 v0, v0, v3
+; GFX1250-NEXT:    v_ldexp_f32 v1, v1, v11
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v2
+; GFX1250-NEXT:    v_cvt_pk_bf16_f32 v1, v1, v4
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = uitofp <4 x i64> %x to <4 x bfloat>
   ret <4 x bfloat> %op
 }
@@ -40073,6 +42716,16 @@ define bfloat @v_select_bf16(i1 %cond, bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, bfloat %a, bfloat %b
   ret bfloat %op
 }
@@ -40156,6 +42809,17 @@ define bfloat @v_select_fneg_lhs_bf16(i1 %cond, bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_fneg_lhs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v1, 0x8000, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %neg.a = fneg bfloat %a
   %op = select i1 %cond, bfloat %neg.a, bfloat %b
   ret bfloat %op
@@ -40240,6 +42904,17 @@ define bfloat @v_select_fneg_rhs_bf16(i1 %cond, bfloat %a, bfloat %b) {
 ; GFX11FAKE16-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_fneg_rhs_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    v_xor_b32_e32 v2, 0x8000, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %neg.b = fneg bfloat %b
   %op = select i1 %cond, bfloat %a, bfloat %neg.b
   ret bfloat %op
@@ -40349,6 +43024,19 @@ define <2 x bfloat> @v_select_v2bf16(i1 %cond, <2 x bfloat> %a, <2 x bfloat> %b)
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v3, 16, v1 :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v4, 16, v2 :: v_dual_cndmask_b32 v0, v2, v1, vcc_lo
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v4, v3, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <2 x bfloat> %a, <2 x bfloat> %b
   ret <2 x bfloat> %op
 }
@@ -40466,6 +43154,21 @@ define <2 x bfloat> @v_vselect_v2bf16(<2 x i1> %cond, <2 x bfloat> %a, <2 x bflo
 ; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v5, v4, vcc_lo
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_vselect_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v4, 16, v2 :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v5, 16, v3 :: v_dual_bitop2_b32 v1, 1, v1 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v5, v4, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select <2 x i1> %cond, <2 x bfloat> %a, <2 x bfloat> %b
   ret <2 x bfloat> %op
 }
@@ -40552,6 +43255,17 @@ define amdgpu_ps i32 @s_select_bf16(bfloat inreg %a, bfloat inreg %b, i32 %c) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11FAKE16-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_select_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s0
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s1, v1, vcc_lo
+; GFX1250-NEXT:    v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq i32 %c, 0
   %op = select i1 %cond, bfloat %a, bfloat %b
   %cast = bitcast bfloat %op to i16
@@ -40687,6 +43401,21 @@ define amdgpu_ps i32 @s_select_v2bf16(<2 x bfloat> inreg %a, <2 x bfloat> inreg
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11FAKE16-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_select_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT:    s_lshr_b32 s3, s1, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s3, v1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s1, v2, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v0, v0, v1, 0x5040100
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq i32 %c, 0
   %op = select i1 %cond, <2 x bfloat> %a, <2 x bfloat> %b
   %cast = bitcast <2 x bfloat> %op to i32
@@ -40824,6 +43553,22 @@ define amdgpu_ps i32 @s_vselect_v2bf16(<2 x bfloat> inreg %a, <2 x bfloat> inreg
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11FAKE16-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_vselect_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_lshr_b32 s2, s0, 16
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX1250-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s0
+; GFX1250-NEXT:    s_lshr_b32 s0, s1, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s0, v2, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s1, v3, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq <2 x i32> %c, zeroinitializer
   %op = select <2 x i1> %cond, <2 x bfloat> %a, <2 x bfloat> %b
   %cast = bitcast <2 x bfloat> %op to i32
@@ -40925,6 +43670,16 @@ define <3 x bfloat> @v_select_v3bf16(i1 %cond, <3 x bfloat> %a, <3 x bfloat> %b)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <3 x bfloat> %a, <3 x bfloat> %b
   ret <3 x bfloat> %op
 }
@@ -41033,6 +43788,16 @@ define <4 x bfloat> @v_select_v4bf16(i1 %cond, <4 x bfloat> %a, <4 x bfloat> %b)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <4 x bfloat> %a, <4 x bfloat> %b
   ret <4 x bfloat> %op
 }
@@ -41168,6 +43933,17 @@ define <6 x bfloat> @v_select_v6bf16(i1 %cond, <6 x bfloat> %a, <6 x bfloat> %b)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v4, v1 :: v_dual_cndmask_b32 v1, v5, v2
 ; GFX11-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v6bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v4, v1 :: v_dual_cndmask_b32 v1, v5, v2
+; GFX1250-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc_lo
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <6 x bfloat> %a, <6 x bfloat> %b
   ret <6 x bfloat> %op
 }
@@ -41329,6 +44105,17 @@ define <8 x bfloat> @v_select_v8bf16(i1 %cond, <8 x bfloat> %a, <8 x bfloat> %b)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v5, v1 :: v_dual_cndmask_b32 v1, v6, v2
 ; GFX11-NEXT:    v_dual_cndmask_b32 v2, v7, v3 :: v_dual_cndmask_b32 v3, v8, v4
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v5, v1 :: v_dual_cndmask_b32 v1, v6, v2
+; GFX1250-NEXT:    v_dual_cndmask_b32 v2, v7, v3 :: v_dual_cndmask_b32 v3, v8, v4
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <8 x bfloat> %a, <8 x bfloat> %b
   ret <8 x bfloat> %op
 }
@@ -41604,6 +44391,19 @@ define <16 x bfloat> @v_select_v16bf16(i1 %cond, <16 x bfloat> %a, <16 x bfloat>
 ; GFX11-NEXT:    v_dual_cndmask_b32 v4, v13, v5 :: v_dual_cndmask_b32 v5, v14, v6
 ; GFX11-NEXT:    v_dual_cndmask_b32 v6, v15, v7 :: v_dual_cndmask_b32 v7, v16, v8
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v9, v1 :: v_dual_cndmask_b32 v1, v10, v2
+; GFX1250-NEXT:    v_dual_cndmask_b32 v2, v11, v3 :: v_dual_cndmask_b32 v3, v12, v4
+; GFX1250-NEXT:    v_dual_cndmask_b32 v4, v13, v5 :: v_dual_cndmask_b32 v5, v14, v6
+; GFX1250-NEXT:    v_dual_cndmask_b32 v6, v15, v7 :: v_dual_cndmask_b32 v7, v16, v8
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <16 x bfloat> %a, <16 x bfloat> %b
   ret <16 x bfloat> %op
 }
@@ -42234,6 +45034,27 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_dual_cndmask_b32 v14, v31, v15 :: v_dual_cndmask_b32 v15, v32, v16
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_select_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    scratch_load_b32 v32, off, s32 offset:4
+; GFX1250-NEXT:    v_and_b32_e32 v0, 1, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v17, v1 :: v_dual_cndmask_b32 v1, v18, v2
+; GFX1250-NEXT:    v_dual_cndmask_b32 v2, v19, v3 :: v_dual_cndmask_b32 v3, v20, v4
+; GFX1250-NEXT:    v_dual_cndmask_b32 v4, v21, v5 :: v_dual_cndmask_b32 v5, v22, v6
+; GFX1250-NEXT:    v_dual_cndmask_b32 v6, v23, v7 :: v_dual_cndmask_b32 v7, v24, v8
+; GFX1250-NEXT:    v_dual_cndmask_b32 v8, v25, v9 :: v_dual_cndmask_b32 v9, v26, v10
+; GFX1250-NEXT:    v_dual_cndmask_b32 v10, v27, v11 :: v_dual_cndmask_b32 v11, v28, v12
+; GFX1250-NEXT:    v_dual_cndmask_b32 v12, v29, v13 :: v_dual_cndmask_b32 v13, v30, v14
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v14, v31, v15 :: v_dual_cndmask_b32 v15, v32, v16
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select i1 %cond, <32 x bfloat> %a, <32 x bfloat> %b
   ret <32 x bfloat> %op
 }
@@ -42349,6 +45170,20 @@ define amdgpu_ps <2 x i32> @s_select_v3bf16(<3 x bfloat> inreg %a, <3 x bfloat>
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_select_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s2, v1, vcc_lo
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s3, v2, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX1250-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq i32 %c, 0
   %op = select i1 %cond, <3 x bfloat> %a, <3 x bfloat> %b
   %cast = bitcast <3 x bfloat> %op to i48
@@ -42475,6 +45310,18 @@ define amdgpu_ps <2 x i32> @s_select_v4bf16(<4 x bfloat> inreg %a, <4 x bfloat>
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
 ; GFX11-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_select_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s3, v1, vcc_lo
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s2, v2, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq i32 %c, 0
   %op = select i1 %cond, <4 x bfloat> %a, <4 x bfloat> %b
   %cast = bitcast <4 x bfloat> %op to <2 x i32>
@@ -42709,6 +45556,33 @@ define amdgpu_ps <2 x i32> @s_vselect_v4bf16(<4 x bfloat> inreg %a, <4 x bfloat>
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11FAKE16-NEXT:    ; return to shader part epilog
+;
+; GFX1250-LABEL: s_vselect_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_lshr_b32 s4, s1, 16
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v3
+; GFX1250-NEXT:    v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s1
+; GFX1250-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX1250-NEXT:    s_lshr_b32 s5, s0, 16
+; GFX1250-NEXT:    v_mov_b32_e32 v6, s0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v3, s4, v4, vcc_lo
+; GFX1250-NEXT:    v_mov_b32_e32 v4, s5
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX1250-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s0, v4, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s2, v6, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    v_cndmask_b32_e32 v2, s3, v5, vcc_lo
+; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
+; GFX1250-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX1250-NEXT:    ; return to shader part epilog
   %cond = icmp eq <4 x i32> %c, zeroinitializer
   %op = select <4 x i1> %cond, <4 x bfloat> %a, <4 x bfloat> %b
   %cast = bitcast <4 x bfloat> %op to <2 x i32>
@@ -42912,6 +45786,28 @@ define <4 x bfloat> @v_vselect_v4bf16(<4 x i1> %cond, <4 x bfloat> %a, <4 x bflo
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_vselect_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v2, 1, v2
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v8, 16, v4 :: v_dual_bitop2_b32 v1, 1, v1 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v9, 16, v6 :: v_dual_bitop2_b32 v3, 1, v3 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX1250-NEXT:    v_dual_cndmask_b32 v2, v7, v5, vcc_lo :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v7, 16, v7 :: v_dual_lshrrev_b32 v5, 16, v5
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v6, v4, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v9, v8, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
+; GFX1250-NEXT:    v_cndmask_b32_e32 v3, v7, v5, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select <4 x i1> %cond, <4 x bfloat> %a, <4 x bfloat> %b
   ret <4 x bfloat> %op
 }
@@ -43264,6 +46160,46 @@ define <8 x bfloat> @v_vselect_v8bf16(<8 x i1> %cond, <8 x bfloat> %a, <8 x bflo
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v3, v7, v6, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_vselect_v8bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_and_b32_e32 v6, 1, v6
+; GFX1250-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v17, 16, v14 :: v_dual_bitop2_b32 v5, 1, v5 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v16, 16, v10 :: v_dual_bitop2_b32 v3, 1, v3 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
+; GFX1250-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX1250-NEXT:    v_dual_cndmask_b32 v6, v15, v11, vcc_lo :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
+; GFX1250-NEXT:    v_and_b32_e32 v7, 1, v7
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v11, 16, v11
+; GFX1250-NEXT:    v_dual_cndmask_b32 v4, v14, v10 :: v_dual_lshrrev_b32 v15, 16, v15
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v5
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v14, 16, v12 :: v_dual_bitop2_b32 v2, 1, v2 bitop3:0x40
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v10, 16, v8
+; GFX1250-NEXT:    v_cndmask_b32_e32 v5, v17, v16, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX1250-NEXT:    v_cndmask_b32_e32 v2, v13, v9, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
+; GFX1250-NEXT:    v_dual_cndmask_b32 v0, v12, v8 :: v_dual_lshrrev_b32 v13, 16, v13
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v14, v10, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    v_cndmask_b32_e32 v3, v13, v9, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX1250-NEXT:    v_cndmask_b32_e32 v7, v15, v11, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v2, v5, v4, 0x5040100
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_perm_b32 v3, v7, v6, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select <8 x i1> %cond, <8 x bfloat> %a, <8 x bfloat> %b
   ret <8 x bfloat> %op
 }
@@ -44002,6 +46938,74 @@ define <16 x bfloat> @v_vselect_v16bf16(<16 x i1> %cond, <16 x bfloat> %a, <16 x
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v7, v15, v14, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_vselect_v16bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v52, 16, v25 :: v_dual_bitop2_b32 v12, 1, v12 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v53, 16, v16 :: v_dual_bitop2_b32 v13, 1, v13 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v33, 16, v22 :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v34, 16, v30 :: v_dual_bitop2_b32 v3, 1, v3 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v51, 16, v17 :: v_dual_bitop2_b32 v10, 1, v10 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v12, v30, v22, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v13
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v50, 16, v26 :: v_dual_bitop2_b32 v11, 1, v11 bitop3:0x40
+; GFX1250-NEXT:    v_and_b32_e32 v14, 1, v14
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v35, 16, v21 :: v_dual_bitop2_b32 v2, 1, v2 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v13, v34, v33, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v10
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v36, 16, v29 :: v_dual_bitop2_b32 v4, 1, v4 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v49, 16, v18 :: v_dual_bitop2_b32 v8, 1, v8 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v10, v29, v21, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v11
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v37, 16, v20 :: v_dual_bitop2_b32 v5, 1, v5 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v38, 16, v28 :: v_dual_bitop2_b32 v7, 1, v7 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v48, 16, v27 :: v_dual_bitop2_b32 v9, 1, v9 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v11, v36, v35, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v39, 16, v19 :: v_dual_bitop2_b32 v6, 1, v6 bitop3:0x40
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v32, 16, v23 :: v_dual_bitop2_b32 v1, 1, v1 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v8, v28, v20, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v9
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v54, 16, v24 :: v_dual_bitop2_b32 v15, 1, v15 bitop3:0x40
+; GFX1250-NEXT:    v_cndmask_b32_e32 v9, v38, v37, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
+; GFX1250-NEXT:    v_cndmask_b32_e32 v6, v27, v19, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
+; GFX1250-NEXT:    v_cndmask_b32_e32 v4, v26, v18, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX1250-NEXT:    v_cndmask_b32_e32 v2, v25, v17, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
+; GFX1250-NEXT:    v_cndmask_b32_e32 v3, v52, v51, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_cndmask_b32_e32 v0, v24, v16, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v54, v53, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v5
+; GFX1250-NEXT:    v_cndmask_b32_e32 v5, v50, v49, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v2, v5, v4, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v4, v9, v8, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v5, v11, v10, 0x5040100
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v3, 16, v31
+; GFX1250-NEXT:    v_cndmask_b32_e32 v7, v48, v39, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v14
+; GFX1250-NEXT:    v_cndmask_b32_e32 v14, v31, v23, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v15
+; GFX1250-NEXT:    v_cndmask_b32_e32 v15, v3, v32, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v3, v7, v6, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v6, v13, v12, 0x5040100
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_3)
+; GFX1250-NEXT:    v_perm_b32 v7, v15, v14, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select <16 x i1> %cond, <16 x bfloat> %a, <16 x bfloat> %b
   ret <16 x bfloat> %op
 }
@@ -45856,6 +48860,178 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
 ; GFX11FAKE16-NEXT:    v_perm_b32 v14, v29, v28, 0x5040100
 ; GFX11FAKE16-NEXT:    v_perm_b32 v15, v31, v30, 0x5040100
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_vselect_v32bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_clause 0x1b
+; GFX1250-NEXT:    scratch_load_b32 v31, off, s32 offset:60
+; GFX1250-NEXT:    scratch_load_b32 v32, off, s32 offset:124
+; GFX1250-NEXT:    scratch_load_u16 v33, off, s32
+; GFX1250-NEXT:    scratch_load_b32 v34, off, s32 offset:128
+; GFX1250-NEXT:    scratch_load_b32 v35, off, s32 offset:64
+; GFX1250-NEXT:    scratch_load_b32 v36, off, s32 offset:120
+; GFX1250-NEXT:    scratch_load_b32 v37, off, s32 offset:56
+; GFX1250-NEXT:    scratch_load_b32 v38, off, s32 offset:116
+; GFX1250-NEXT:    scratch_load_b32 v39, off, s32 offset:52
+; GFX1250-NEXT:    scratch_load_b32 v48, off, s32 offset:112
+; GFX1250-NEXT:    scratch_load_b32 v49, off, s32 offset:48
+; GFX1250-NEXT:    scratch_load_b32 v50, off, s32 offset:108
+; GFX1250-NEXT:    scratch_load_b32 v51, off, s32 offset:44
+; GFX1250-NEXT:    scratch_load_b32 v52, off, s32 offset:104
+; GFX1250-NEXT:    scratch_load_b32 v53, off, s32 offset:40
+; GFX1250-NEXT:    scratch_load_b32 v54, off, s32 offset:100
+; GFX1250-NEXT:    scratch_load_b32 v55, off, s32 offset:36
+; GFX1250-NEXT:    scratch_load_b32 v64, off, s32 offset:76
+; GFX1250-NEXT:    scratch_load_b32 v65, off, s32 offset:12
+; GFX1250-NEXT:    scratch_load_b32 v66, off, s32 offset:96
+; GFX1250-NEXT:    scratch_load_b32 v67, off, s32 offset:32
+; GFX1250-NEXT:    scratch_load_b32 v68, off, s32 offset:80
+; GFX1250-NEXT:    scratch_load_b32 v69, off, s32 offset:84
+; GFX1250-NEXT:    scratch_load_b32 v70, off, s32 offset:92
+; GFX1250-NEXT:    scratch_load_b32 v71, off, s32 offset:28
+; GFX1250-NEXT:    scratch_load_b32 v80, off, s32 offset:20
+; GFX1250-NEXT:    scratch_load_b32 v81, off, s32 offset:88
+; GFX1250-NEXT:    scratch_load_b32 v82, off, s32 offset:24
+; GFX1250-NEXT:    v_and_b32_e32 v30, 1, v30
+; GFX1250-NEXT:    v_and_b32_e32 v29, 1, v29
+; GFX1250-NEXT:    v_and_b32_e32 v26, 1, v26
+; GFX1250-NEXT:    v_and_b32_e32 v24, 1, v24
+; GFX1250-NEXT:    v_and_b32_e32 v22, 1, v22
+; GFX1250-NEXT:    v_and_b32_e32 v20, 1, v20
+; GFX1250-NEXT:    v_and_b32_e32 v18, 1, v18
+; GFX1250-NEXT:    v_and_b32_e32 v16, 1, v16
+; GFX1250-NEXT:    v_and_b32_e32 v10, 1, v10
+; GFX1250-NEXT:    v_and_b32_e32 v6, 1, v6
+; GFX1250-NEXT:    v_and_b32_e32 v4, 1, v4
+; GFX1250-NEXT:    v_and_b32_e32 v1, 1, v1
+; GFX1250-NEXT:    v_and_b32_e32 v3, 1, v3
+; GFX1250-NEXT:    v_and_b32_e32 v5, 1, v5
+; GFX1250-NEXT:    v_and_b32_e32 v23, 1, v23
+; GFX1250-NEXT:    v_and_b32_e32 v9, 1, v9
+; GFX1250-NEXT:    v_and_b32_e32 v13, 1, v13
+; GFX1250-NEXT:    v_and_b32_e32 v15, 1, v15
+; GFX1250-NEXT:    v_and_b32_e32 v21, 1, v21
+; GFX1250-NEXT:    v_and_b32_e32 v11, 1, v11
+; GFX1250-NEXT:    v_and_b32_e32 v19, 1, v19
+; GFX1250-NEXT:    s_wait_loadcnt 0x1a
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v83, 16, v32 :: v_dual_bitop2_b32 v17, 1, v17 bitop3:0x40
+; GFX1250-NEXT:    v_cmp_eq_u32_e64 s1, 1, v30
+; GFX1250-NEXT:    v_and_b32_e32 v28, 1, v28
+; GFX1250-NEXT:    s_wait_loadcnt 0x17
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT:    v_dual_cndmask_b32 v30, v34, v35, s1 :: v_dual_bitop2_b32 v33, 1, v33 bitop3:0x40
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v28
+; GFX1250-NEXT:    v_lshrrev_b32_e32 v28, 16, v31
+; GFX1250-NEXT:    v_cmp_eq_u32_e64 s0, 1, v29
+; GFX1250-NEXT:    scratch_load_b32 v29, off, s32 offset:16
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v35, 16, v35 :: v_dual_lshrrev_b32 v34, 16, v34
+; GFX1250-NEXT:    v_cndmask_b32_e32 v31, v32, v31, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v33
+; GFX1250-NEXT:    scratch_load_b32 v32, off, s32 offset:72
+; GFX1250-NEXT:    v_cndmask_b32_e64 v28, v83, v28, s0
+; GFX1250-NEXT:    scratch_load_b32 v83, off, s32 offset:4
+; GFX1250-NEXT:    v_cndmask_b32_e32 v34, v34, v35, vcc_lo
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    scratch_load_b32 v35, off, s32 offset:68
+; GFX1250-NEXT:    scratch_load_b32 v33, off, s32 offset:8
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v26
+; GFX1250-NEXT:    s_wait_loadcnt 0x1a
+; GFX1250-NEXT:    v_dual_cndmask_b32 v26, v36, v37, vcc_lo :: v_dual_bitop2_b32 v0, 1, v0 bitop3:0x40
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v24
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v37, 16, v37 :: v_dual_bitop2_b32 v2, 1, v2 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x18
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v36, 16, v36 :: v_dual_cndmask_b32 v24, v38, v39, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v22
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v38, 16, v38 :: v_dual_bitop2_b32 v7, 1, v7 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x16
+; GFX1250-NEXT:    v_dual_cndmask_b32 v22, v48, v49 :: v_dual_lshrrev_b32 v39, 16, v39
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v20
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v49, 16, v49 :: v_dual_bitop2_b32 v8, 1, v8 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x14
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v48, 16, v48 :: v_dual_cndmask_b32 v20, v50, v51, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v18
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v51, 16, v51 :: v_dual_bitop2_b32 v12, 1, v12 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x12
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v50, 16, v50 :: v_dual_cndmask_b32 v18, v52, v53, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v53, 16, v53 :: v_dual_bitop2_b32 v14, 1, v14 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x10
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v52, 16, v52 :: v_dual_cndmask_b32 v16, v54, v55, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v14
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v55, 16, v55 :: v_dual_lshrrev_b32 v54, 16, v54
+; GFX1250-NEXT:    s_wait_loadcnt 0xc
+; GFX1250-NEXT:    v_cndmask_b32_e32 v14, v66, v67, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v67, 16, v67 :: v_dual_lshrrev_b32 v66, 16, v66
+; GFX1250-NEXT:    s_wait_loadcnt 0x8
+; GFX1250-NEXT:    v_cndmask_b32_e32 v12, v70, v71, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v10
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v70, 16, v70 :: v_dual_bitop2_b32 v25, 1, v25 bitop3:0x40
+; GFX1250-NEXT:    s_wait_loadcnt 0x5
+; GFX1250-NEXT:    v_dual_cndmask_b32 v10, v81, v82 :: v_dual_lshrrev_b32 v71, 16, v71
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v8
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v82, 16, v82 :: v_dual_bitop2_b32 v27, 1, v27 bitop3:0x40
+; GFX1250-NEXT:    v_dual_cndmask_b32 v8, v69, v80 :: v_dual_lshrrev_b32 v81, 16, v81
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v6
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v80, 16, v80 :: v_dual_lshrrev_b32 v69, 16, v69
+; GFX1250-NEXT:    s_wait_loadcnt 0x4
+; GFX1250-NEXT:    v_dual_cndmask_b32 v6, v68, v29 :: v_dual_lshrrev_b32 v29, 16, v29
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v4
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v68, 16, v68 :: v_dual_cndmask_b32 v4, v64, v65, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v65, 16, v65 :: v_dual_lshrrev_b32 v64, 16, v64
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_cndmask_b32 v2, v32, v33 :: v_dual_lshrrev_b32 v33, 16, v33
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v32, 16, v32 :: v_dual_cndmask_b32 v0, v35, v83, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v27
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v83, 16, v83 :: v_dual_cndmask_b32 v27, v36, v37, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v25
+; GFX1250-NEXT:    v_cndmask_b32_e32 v25, v38, v39, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v23
+; GFX1250-NEXT:    v_dual_lshrrev_b32 v35, 16, v35 :: v_dual_cndmask_b32 v23, v48, v49, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v21
+; GFX1250-NEXT:    v_cndmask_b32_e32 v21, v50, v51, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v19
+; GFX1250-NEXT:    v_cndmask_b32_e32 v19, v52, v53, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v17
+; GFX1250-NEXT:    v_cndmask_b32_e32 v17, v54, v55, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v15
+; GFX1250-NEXT:    v_cndmask_b32_e32 v15, v66, v67, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v13
+; GFX1250-NEXT:    v_cndmask_b32_e32 v13, v70, v71, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v11
+; GFX1250-NEXT:    v_cndmask_b32_e32 v11, v81, v82, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX1250-NEXT:    v_cndmask_b32_e32 v7, v68, v29, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v3
+; GFX1250-NEXT:    v_cndmask_b32_e32 v3, v32, v33, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX1250-NEXT:    v_cndmask_b32_e32 v1, v35, v83, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v5
+; GFX1250-NEXT:    v_cndmask_b32_e32 v5, v64, v65, vcc_lo
+; GFX1250-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v9
+; GFX1250-NEXT:    v_cndmask_b32_e32 v9, v69, v80, vcc_lo
+; GFX1250-NEXT:    v_perm_b32 v0, v1, v0, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v1, v3, v2, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v2, v5, v4, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v3, v7, v6, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v4, v9, v8, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v5, v11, v10, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v6, v13, v12, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v7, v15, v14, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v8, v17, v16, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v9, v19, v18, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v10, v21, v20, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v11, v23, v22, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v12, v25, v24, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v13, v27, v26, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v14, v28, v31, 0x5040100
+; GFX1250-NEXT:    v_perm_b32 v15, v34, v30, 0x5040100
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = select <32 x i1> %cond, <32 x bfloat> %a, <32 x bfloat> %b
   ret <32 x bfloat> %op
 }
@@ -45987,6 +49163,13 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fma_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,1]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.fma.bf16(bfloat %a, bfloat %b, bfloat %c)
   ret bfloat %op
 }
@@ -46178,6 +49361,13 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fma_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v1, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)
   ret <2 x bfloat> %op
 }
@@ -46446,6 +49636,14 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v3, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fma_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v2, v4
+; GFX1250-NEXT:    v_pk_fma_bf16 v1, v1, v3, v5
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c)
   ret <3 x bfloat> %op
 }
@@ -46780,6 +49978,14 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v4, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fma_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v2, v4
+; GFX1250-NEXT:    v_pk_fma_bf16 v1, v1, v3, v5
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c)
   ret <4 x bfloat> %op
 }
@@ -46915,6 +50121,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmuladd_bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,1]
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c)
   ret bfloat %op
 }
@@ -47114,6 +50327,13 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmuladd_v2bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v1, v2
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)
   ret <2 x bfloat> %op
 }
@@ -47394,6 +50614,14 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v3, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmuladd_v3bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v2, v4
+; GFX1250-NEXT:    v_pk_fma_bf16 v1, v1, v3, v5
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c)
   ret <3 x bfloat> %op
 }
@@ -47744,6 +50972,16 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_perm_b32 v1, v4, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: v_fmuladd_v4bf16:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_pk_fma_bf16 v0, v0, v2, v4
+; GFX1250-NEXT:    v_pk_fma_bf16 v1, v1, v3, v5
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
   %op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c)
   ret <4 x bfloat> %op
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250FAKE16: {{.*}}

>From 830a113e01cf269ca9affa9858ec57f1c2652bba Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Tue, 23 Sep 2025 09:37:24 +0200
Subject: [PATCH 14/42] [libc++] Remove a few unused includes from <string> and
 <vector> (#160087)

---
 libcxx/include/__vector/vector.h | 1 -
 libcxx/include/string            | 4 ----
 2 files changed, 5 deletions(-)

diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h
index 27e681aeef22a..a69aa9145e638 100644
--- a/libcxx/include/__vector/vector.h
+++ b/libcxx/include/__vector/vector.h
@@ -23,7 +23,6 @@
 #include <__debug_utils/sanitizers.h>
 #include <__format/enable_insertable.h>
 #include <__fwd/vector.h>
-#include <__iterator/advance.h>
 #include <__iterator/bounded_iter.h>
 #include <__iterator/concepts.h>
 #include <__iterator/distance.h>
diff --git a/libcxx/include/string b/libcxx/include/string
index bbd7b98f112a6..081467edfe3fb 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -600,7 +600,6 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
 #  include <__functional/hash.h>
 #  include <__functional/unary_function.h>
 #  include <__fwd/string.h>
-#  include <__ios/fpos.h>
 #  include <__iterator/bounded_iter.h>
 #  include <__iterator/distance.h>
 #  include <__iterator/iterator_traits.h>
@@ -620,7 +619,6 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
 #  include <__ranges/concepts.h>
 #  include <__ranges/container_compatible_range.h>
 #  include <__ranges/from_range.h>
-#  include <__ranges/size.h>
 #  include <__string/char_traits.h>
 #  include <__string/extern_template_lists.h>
 #  include <__type_traits/conditional.h>
@@ -637,8 +635,6 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );
 #  include <__type_traits/is_trivially_copyable.h>
 #  include <__type_traits/is_trivially_relocatable.h>
 #  include <__type_traits/remove_cvref.h>
-#  include <__type_traits/void_t.h>
-#  include <__utility/auto_cast.h>
 #  include <__utility/default_three_way_comparator.h>
 #  include <__utility/forward.h>
 #  include <__utility/is_pointer_in_range.h>

>From 03bf361159657150f3d38455c9c022e6998fd588 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Tue, 23 Sep 2025 09:47:23 +0200
Subject: [PATCH 15/42] [clang][bytecode] Load value of non-lvalue
 ArraySubscriptExpr (#160024)

As happens in C.

Fixes #158482
---
 clang/lib/AST/ByteCode/Compiler.cpp | 7 ++++++-
 clang/test/AST/ByteCode/c.c         | 4 ++++
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index fafec47f7de3c..7518cfd2cf94d 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -1787,7 +1787,12 @@ bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
     return false;
   if (DiscardResult)
     return this->emitPopPtr(E);
-  return true;
+
+  if (E->isGLValue())
+    return true;
+
+  OptPrimType T = classifyPrim(E);
+  return this->emitLoadPop(*T, E);
 }
 
 template <class Emitter>
diff --git a/clang/test/AST/ByteCode/c.c b/clang/test/AST/ByteCode/c.c
index 6681a4f427093..657a920e7d02c 100644
--- a/clang/test/AST/ByteCode/c.c
+++ b/clang/test/AST/ByteCode/c.c
@@ -368,3 +368,7 @@ void discardedCmp(void)
 {
     (*_b) = ((&a == &a) , a); // all-warning {{left operand of comma operator has no effect}}
 }
+
+/// ArraySubscriptExpr that's not an lvalue
+typedef unsigned char U __attribute__((vector_size(1)));
+void nonLValueASE(U f) { f[0] = f[((U)(U){0})[0]]; }

>From 5fce052e8a886714ddc6e6d37f43388cebb76c48 Mon Sep 17 00:00:00 2001
From: Marco Elver <elver at google.com>
Date: Tue, 23 Sep 2025 09:57:35 +0200
Subject: [PATCH 16/42] Thread Safety Analysis: Fix recursive capability alias
 resolution (#159921)

Fix a false positive in thread safety alias analysis caused by incorrect
late resolution of aliases. The analysis previously failed to
distinguish between an alias and its defining expression; reassigning a
variable within that expression (e.g., `ptr` in `alias = ptr->field`)
would incorrectly change the dependent alias as well.

The fix is to properly use LocalVariableMap::lookupExpr's updated
context in a recursive lookup.

Reported-by: Christoph Hellwig <hch at lst.de>
Link: https://lkml.kernel.org/r/20250919140803.GA23745@lst.de
---
 .../Analysis/Analyses/ThreadSafetyCommon.h    |  6 ++-
 clang/lib/Analysis/ThreadSafety.cpp           | 45 ++++++++++---------
 clang/lib/Analysis/ThreadSafetyCommon.cpp     | 12 ++++-
 .../SemaCXX/warn-thread-safety-analysis.cpp   | 18 ++++++++
 4 files changed, 57 insertions(+), 24 deletions(-)

diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index d20f172f446e6..ffdfde8b7d453 100644
--- a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -543,10 +543,14 @@ class SExprBuilder {
   til::BasicBlock *CurrentBB = nullptr;
   BlockInfo *CurrentBlockInfo = nullptr;
 
+  // The closure that captures state required for the lookup; this may be
+  // mutable, so we have to save/restore before/after recursive lookups.
+  using LookupLocalVarExprClosure =
+      std::function<const Expr *(const NamedDecl *)>;
   // Recursion guard.
   llvm::DenseSet<const ValueDecl *> VarsBeingTranslated;
   // Context-dependent lookup of currently valid definitions of local variables.
-  std::function<const Expr *(const NamedDecl *)> LookupLocalVarExpr;
+  LookupLocalVarExprClosure LookupLocalVarExpr;
 };
 
 #ifndef NDEBUG
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index cee98d58a6112..d19f86a2223d8 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -1668,13 +1668,13 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
   const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
   const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
 
-  // Temporarily set the lookup context for SExprBuilder.
-  SxBuilder.setLookupLocalVarExpr([&](const NamedDecl *D) -> const Expr * {
-    if (!Handler.issueBetaWarnings())
-      return nullptr;
-    auto Ctx = LVarCtx;
-    return LocalVarMap.lookupExpr(D, Ctx);
-  });
+  if (Handler.issueBetaWarnings()) {
+    // Temporarily set the lookup context for SExprBuilder.
+    SxBuilder.setLookupLocalVarExpr(
+        [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
+          return LocalVarMap.lookupExpr(D, Ctx);
+        });
+  }
   auto Cleanup = llvm::make_scope_exit(
       [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
 
@@ -1722,6 +1722,19 @@ class BuildLockset : public ConstStmtVisitor<BuildLockset> {
   LocalVariableMap::Context LVarCtx;
   unsigned CtxIndex;
 
+  // To update and adjust the context.
+  void updateLocalVarMapCtx(const Stmt *S) {
+    if (S)
+      LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+    if (!Analyzer->Handler.issueBetaWarnings())
+      return;
+    // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
+    Analyzer->SxBuilder.setLookupLocalVarExpr(
+        [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
+          return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
+        });
+  }
+
   // helper functions
 
   void checkAccess(const Expr *Exp, AccessKind AK,
@@ -1747,13 +1760,7 @@ class BuildLockset : public ConstStmtVisitor<BuildLockset> {
       : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
         FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
         CtxIndex(Info.EntryIndex) {
-    Analyzer->SxBuilder.setLookupLocalVarExpr(
-        [this](const NamedDecl *D) -> const Expr * {
-          if (!Analyzer->Handler.issueBetaWarnings())
-            return nullptr;
-          auto Ctx = LVarCtx;
-          return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
-        });
+    updateLocalVarMapCtx(nullptr);
   }
 
   ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
@@ -2259,9 +2266,7 @@ void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
   if (!BO->isAssignmentOp())
     return;
 
-  // adjust the context
-  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
-
+  updateLocalVarMapCtx(BO);
   checkAccess(BO->getLHS(), AK_Written);
 }
 
@@ -2307,8 +2312,7 @@ void BuildLockset::examineArguments(const FunctionDecl *FD,
 }
 
 void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
-  // adjust the context
-  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, Exp, LVarCtx);
+  updateLocalVarMapCtx(Exp);
 
   if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
     const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
@@ -2404,8 +2408,7 @@ static const Expr *UnpackConstruction(const Expr *E) {
 }
 
 void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
-  // adjust the context
-  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+  updateLocalVarMapCtx(S);
 
   for (auto *D : S->getDeclGroup()) {
     if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index 25ad673b58db6..ef48ae439c5f3 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -248,9 +248,17 @@ til::SExpr *SExprBuilder::translateVariable(const VarDecl *VD,
   // defining VD, use its pre-assignment value to break the cycle.
   if (VarsBeingTranslated.contains(VD->getCanonicalDecl()))
     return new (Arena) til::LiteralPtr(VD);
-  VarsBeingTranslated.insert(VD->getCanonicalDecl());
+
+  // The closure captures state that is updated to correctly translate chains of
+  // aliases. Restore it when we are done with recursive translation.
   auto Cleanup = llvm::make_scope_exit(
-      [&] { VarsBeingTranslated.erase(VD->getCanonicalDecl()); });
+      [&, RestoreClosure =
+              VarsBeingTranslated.empty() ? LookupLocalVarExpr : nullptr] {
+        VarsBeingTranslated.erase(VD->getCanonicalDecl());
+        if (VarsBeingTranslated.empty())
+          LookupLocalVarExpr = RestoreClosure;
+      });
+  VarsBeingTranslated.insert(VD->getCanonicalDecl());
 
   QualType Ty = VD->getType();
   if (!VD->isStaticLocal() && Ty->isPointerType()) {
diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
index ef662b78fb6f1..0e91639a271c5 100644
--- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -7463,6 +7463,7 @@ void testNestedAcquire(Container *c) EXCLUSIVE_LOCK_FUNCTION(&c->foo.mu) {
 
 struct ContainerOfPtr {
   Foo *foo_ptr;
+  ContainerOfPtr *next;
 };
 
 void testIndirectAccess(ContainerOfPtr *fc) {
@@ -7472,6 +7473,23 @@ void testIndirectAccess(ContainerOfPtr *fc) {
   ptr->mu.Unlock();
 }
 
+void testAliasChainUnrelatedReassignment1(ContainerOfPtr *list) {
+  Foo *eb = list->foo_ptr;
+  eb->mu.Lock();
+  list = list->next;
+  eb->data = 42;
+  eb->mu.Unlock();
+}
+
+void testAliasChainUnrelatedReassignment2(ContainerOfPtr *list) {
+  ContainerOfPtr *busyp = list;
+  Foo *eb = busyp->foo_ptr;
+  eb->mu.Lock();
+  busyp = busyp->next;
+  eb->data = 42;
+  eb->mu.Unlock();
+}
+
 void testControlFlowDoWhile(Foo *f, int x) {
   Foo *ptr = f;
 

>From c0c5129360e7ac1cfcd52dc114b1493f262f18fd Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Tue, 23 Sep 2025 09:59:11 +0200
Subject: [PATCH 17/42] [libc++][NFC] Remove guard around
 noop_coroutine_handle.h (#160044)

This check is always true, since all supported version of Clang have
`__builtin_coro_noop` and the only other supported compiler is GCC.
---
 libcxx/include/__coroutine/noop_coroutine_handle.h | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/libcxx/include/__coroutine/noop_coroutine_handle.h b/libcxx/include/__coroutine/noop_coroutine_handle.h
index 2b2838b6bf49b..692398a8a8431 100644
--- a/libcxx/include/__coroutine/noop_coroutine_handle.h
+++ b/libcxx/include/__coroutine/noop_coroutine_handle.h
@@ -20,8 +20,6 @@
 
 _LIBCPP_BEGIN_NAMESPACE_STD
 
-#  if __has_builtin(__builtin_coro_noop) || defined(_LIBCPP_COMPILER_GCC)
-
 // [coroutine.noop]
 // [coroutine.promise.noop]
 struct noop_coroutine_promise {};
@@ -56,12 +54,12 @@ struct coroutine_handle<noop_coroutine_promise> {
 private:
   _LIBCPP_HIDE_FROM_ABI friend coroutine_handle<noop_coroutine_promise> noop_coroutine() noexcept;
 
-#    if __has_builtin(__builtin_coro_noop)
+#  if __has_builtin(__builtin_coro_noop)
   _LIBCPP_HIDE_FROM_ABI coroutine_handle() noexcept { this->__handle_ = __builtin_coro_noop(); }
 
   void* __handle_ = nullptr;
 
-#    elif defined(_LIBCPP_COMPILER_GCC)
+#  elif defined(_LIBCPP_COMPILER_GCC)
   // GCC doesn't implement __builtin_coro_noop().
   // Construct the coroutine frame manually instead.
   struct __noop_coroutine_frame_ty_ {
@@ -78,20 +76,18 @@ struct coroutine_handle<noop_coroutine_promise> {
 
   _LIBCPP_HIDE_FROM_ABI coroutine_handle() noexcept = default;
 
-#    endif // __has_builtin(__builtin_coro_noop)
+#  endif // __has_builtin(__builtin_coro_noop)
 };
 
 using noop_coroutine_handle = coroutine_handle<noop_coroutine_promise>;
 
-#    if defined(_LIBCPP_COMPILER_GCC)
+#  if defined(_LIBCPP_COMPILER_GCC)
 inline noop_coroutine_handle::__noop_coroutine_frame_ty_ noop_coroutine_handle::__noop_coroutine_frame_{};
-#    endif
+#  endif
 
 // [coroutine.noop.coroutine]
 inline _LIBCPP_HIDE_FROM_ABI noop_coroutine_handle noop_coroutine() noexcept { return noop_coroutine_handle(); }
 
-#  endif // __has_builtin(__builtin_coro_noop) || defined(_LIBCPP_COMPILER_GCC)
-
 _LIBCPP_END_NAMESPACE_STD
 
 #endif // _LIBCPP_STD_VER >= 20

>From 2d6b56453e27aaa9ea7fc242b8a6887daf2b01d6 Mon Sep 17 00:00:00 2001
From: Tomohiro Kashiwada <kikairoya at gmail.com>
Date: Tue, 23 Sep 2025 17:10:20 +0900
Subject: [PATCH 18/42] [clang][DebugInfo] Re-enable VTable debug info on COFF
 platforms (#158450)

The debug info for VTables introduced in #130255 was temporarily
disabled on COFF platforms by #151684, due to the risk of emitting
dangling relocations (see also:
https://github.com/llvm/llvm-project/issues/149639#issuecomment-3114257062
).

This patch re-enables that debug info and adds a guard to prevent
emitting dangling relocations by checking whether the VTable definition
is actually emitted.

Resolves #149639
---
 clang/lib/CodeGen/CGDebugInfo.cpp             | 14 +++++-
 clang/test/DebugInfo/CXX/class.cpp            | 46 +++++++------------
 clang/test/DebugInfo/CXX/vtable-external.cpp  | 17 +++++--
 .../CXX/vtable-inheritance-diamond.cpp        |  2 +
 .../CXX/vtable-inheritance-multiple.cpp       |  2 +
 .../CXX/vtable-inheritance-simple-main.cpp    | 24 ++++++++++
 .../CXX/vtable-inheritance-simple.cpp         |  9 +---
 .../CXX/vtable-inheritance-virtual.cpp        |  2 +
 .../CXX/vtable-template-instantiation.cpp     | 35 +++++++++-----
 clang/test/Modules/ExtDebugInfo.cpp           | 15 ++----
 10 files changed, 102 insertions(+), 64 deletions(-)

diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 578d09f7971d6..12c7d48e20d67 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -2657,12 +2657,22 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
 // existing information in the DWARF. The type is assumed to be 'void *'.
 void CGDebugInfo::emitVTableSymbol(llvm::GlobalVariable *VTable,
                                    const CXXRecordDecl *RD) {
-  if (!CGM.getTarget().getCXXABI().isItaniumFamily() ||
-      CGM.getTarget().getTriple().isOSBinFormatCOFF())
+  if (!CGM.getTarget().getCXXABI().isItaniumFamily())
     return;
   if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
     return;
 
+  // On COFF platform, we shouldn't emit a reference to an external entity (i.e.
+  // VTable) into debug info, which is constructed within a discardable section.
+  // If that entity ends up implicitly dllimported from another DLL, the linker
+  // may produce a runtime pseudo-relocation for it (BFD-ld only. LLD prohibits
+  // to emit such relocation). If the debug section is stripped, the runtime
+  // pseudo-relocation points to memory space outside of the module, causing an
+  // access violation.
+  if (CGM.getTarget().getTriple().isOSBinFormatCOFF() &&
+      VTable->isDeclarationForLinker())
+    return;
+
   ASTContext &Context = CGM.getContext();
   StringRef SymbolName = "_vtable$";
   SourceLocation Loc;
diff --git a/clang/test/DebugInfo/CXX/class.cpp b/clang/test/DebugInfo/CXX/class.cpp
index aa24a63c58cb8..e67fba8021a14 100644
--- a/clang/test/DebugInfo/CXX/class.cpp
+++ b/clang/test/DebugInfo/CXX/class.cpp
@@ -99,12 +99,12 @@ int main(int argc, char **argv) {
   return 0;
 }
 
-// RUN: %clang_cc1 -triple x86_64-unknown_unknown -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK -check-prefix=CHECKELF %s
-// RUN: %clang_cc1 -triple i686-cygwin -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK -check-prefix=CHECKCOFF %s
-// RUN: %clang_cc1 -triple armv7l-unknown-linux-gnueabihf -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK -check-prefix=CHECKELF %s
-// RUN: %clang_cc1 -triple x86_64-unknown_unknown -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK -check-prefix=CHECKELF %s
-// RUN: %clang_cc1 -triple i686-cygwin -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK -check-prefix=CHECKCOFF %s
-// RUN: %clang_cc1 -triple armv7l-unknown-linux-gnueabihf -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK -check-prefix=CHECKELF %s
+// RUN: %clang_cc1 -triple x86_64-unknown_unknown -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK  %s
+// RUN: %clang_cc1 -triple i686-cygwin -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple armv7l-unknown-linux-gnueabihf -emit-llvm -debug-info-kind=limited -fexceptions -std=c++98 %s -o - | FileCheck -check-prefix=CHECK98 -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple x86_64-unknown_unknown -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple i686-cygwin -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK %s
+// RUN: %clang_cc1 -triple armv7l-unknown-linux-gnueabihf -emit-llvm -debug-info-kind=limited -fexceptions -std=c++11 %s -o - | FileCheck -check-prefix=CHECK11 -check-prefix=CHECK %s
 
 // CHECK98: invoke {{.+}} @_ZN1BD1Ev(ptr {{[^,]*}} %b)
 // CHECK98-NEXT: unwind label %{{.+}}, !dbg ![[EXCEPTLOC:.*]]
@@ -122,14 +122,6 @@ int main(int argc, char **argv) {
 // CHECK-SAME:             ){{$}}
 
 // CHECK:      ![[INT:[0-9]+]] = !DIBasicType(name: "int"
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_structure_type, name: "foo"
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_class_type, name: "bar"
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_union_type, name: "baz"
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_class_type, name: "B"
-// CHECKCOFF-NOT:              DIFlagFwdDecl
-// CHECKCOFF-SAME:             ){{$}}
-// CHECKCOFF: !DIDerivedType(tag: DW_TAG_member, name: "_vptr$B",
-// CHECKCOFF-SAME:           DIFlagArtificial
 
 // CHECK: [[C:![0-9]*]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "C",
 // CHECK-NOT:                              DIFlagFwdDecl
@@ -145,19 +137,19 @@ int main(int argc, char **argv) {
 // CHECK-SAME:                     DIFlagStaticMember
 // CHECK: [[C_DTOR]] = !DISubprogram(name: "~C"
 
-// CHECKELF: !DICompositeType(tag: DW_TAG_structure_type, name: "K"
-// CHECKELF-SAME:             identifier: "_ZTS1K"
-// CHECKELF-SAME:             ){{$}}
+// CHECK: !DICompositeType(tag: DW_TAG_structure_type, name: "K"
+// CHECK-SAME:             identifier: "_ZTS1K"
+// CHECK-SAME:             ){{$}}
 
-// CHECKELF: !DICompositeType(tag: DW_TAG_class_type, name: "B"
-// CHECKELF-NOT:              DIFlagFwdDecl
-// CHECKELF-SAME:             ){{$}}
-// CHECKELF: !DIDerivedType(tag: DW_TAG_member, name: "_vptr$B",
-// CHECKELF-SAME:           DIFlagArtificial
+// CHECK: !DICompositeType(tag: DW_TAG_class_type, name: "B"
+// CHECK-NOT:              DIFlagFwdDecl
+// CHECK-SAME:             ){{$}}
+// CHECK: !DIDerivedType(tag: DW_TAG_member, name: "_vptr$B",
+// CHECK-SAME:           DIFlagArtificial
 
-// CHECKELF: !DICompositeType(tag: DW_TAG_structure_type, name: "foo"
-// CHECKELF: !DICompositeType(tag: DW_TAG_class_type, name: "bar"
-// CHECKELF: !DICompositeType(tag: DW_TAG_union_type, name: "baz"
+// CHECK: !DICompositeType(tag: DW_TAG_structure_type, name: "foo"
+// CHECK: !DICompositeType(tag: DW_TAG_class_type, name: "bar"
+// CHECK: !DICompositeType(tag: DW_TAG_union_type, name: "baz"
 
 // CHECK: [[D:![0-9]+]] = !DICompositeType(tag: DW_TAG_structure_type, name: "D"
 // CHECK-SAME:             size:
@@ -170,10 +162,6 @@ int main(int argc, char **argv) {
 // CHECK-NOT:              identifier:
 // CHECK-SAME:             ){{$}}
 
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_structure_type, name: "K"
-// CHECKCOFF-SAME:             identifier: "_ZTS1K"
-// CHECKCOFF-SAME:             ){{$}}
-
 // CHECK: [[L:![0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "L"
 // CHECK-SAME:             ){{$}}
 // CHECK: [[L_FUNC_DECL:![0-9]*]] = !DISubprogram(name: "func",{{.*}} scope: [[L]]
diff --git a/clang/test/DebugInfo/CXX/vtable-external.cpp b/clang/test/DebugInfo/CXX/vtable-external.cpp
index b5b34c4123e3b..ff8144e740f6f 100644
--- a/clang/test/DebugInfo/CXX/vtable-external.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-external.cpp
@@ -18,7 +18,7 @@
 //   * Its '_vtable$' is NOT generated
 //  # when optimized even if no LLVM passes:
 //   * The vtable is declared as `available_externally` (which is potentially turned into `external` by LLVM passes)
-//   * Its '_vtable$' is generated
+//   * Its '_vtable$' is generated only if the compiler is targeting the non-COFF platforms
 
 struct CInlined {
   virtual void f1() noexcept {}
@@ -64,14 +64,20 @@ int main() {
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes -DNO_DTOR_BODY %s -o - | FileCheck %s -check-prefixes CHECK-NO-DTOR,CHECK-NO-DTOR-O0
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes -DNO_DTOR_BODY %s -o - | FileCheck %s -check-prefixes CHECK-NO-DTOR,CHECK-NO-DTOR-O1
 
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes                %s -o - | FileCheck %s -check-prefixes CHECK-HAS-DTOR,CHECK-HAS-DTOR-O0
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes                %s -o - | FileCheck %s -check-prefixes CHECK-HAS-DTOR,CHECK-HAS-DTOR-O1-NODBG
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes -DNO_DTOR_BODY %s -o - | FileCheck %s -check-prefixes CHECK-NO-DTOR,CHECK-NO-DTOR-O0
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes -DNO_DTOR_BODY %s -o - | FileCheck %s -check-prefixes CHECK-NO-DTOR,CHECK-NO-DTOR-O1-NODBG
+
 // CHECK-HAS-DTOR: $_ZTV8CInlined = comdat any
 // CHECK-HAS-DTOR-NOT: $_ZTV9CNoInline
 // CHECK-HAS-DTOR-NOT: $_ZTV8CNoFnDef
 
 // CHECK-HAS-DTOR-DAG: @_ZTV8CInlined = linkonce_odr {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, comdat, align 8, !dbg [[INLINED_VTABLE_VAR:![0-9]+]]
 // CHECK-HAS-DTOR-DAG: @_ZTV9CNoInline = {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOINLINE_VTABLE_VAR:![0-9]+]]
-// CHECK-HAS-DTOR-O0-DAG: @_ZTV8CNoFnDef = external {{.*}}constant {{{ \[[^]]*\] }}}, align 8{{$}}
-// CHECK-HAS-DTOR-O1-DAG: @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOFNDEF_VTABLE_VAR:![0-9]+]]
+// CHECK-HAS-DTOR-O0-DAG:       @_ZTV8CNoFnDef = external {{.*}}constant {{{ \[[^]]*\] }}}, align 8{{$}}
+// CHECK-HAS-DTOR-O1-DAG:       @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOFNDEF_VTABLE_VAR:![0-9]+]]
+// CHECK-HAS-DTOR-O1-NODBG-DAG: @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8{{$}}
 
 // CHECK-HAS-DTOR: !llvm.dbg.cu
 
@@ -94,8 +100,9 @@ int main() {
 
 // CHECK-NO-DTOR-DAG: @_ZTV8CInlined = external {{.*}}constant {{.*}}, align 8{{$}}
 // CHECK-NO-DTOR-DAG: @_ZTV9CNoInline = {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOINLINE_VTABLE_VAR:![0-9]+]]
-// CHECK-NO-DTOR-O0-DAG: @_ZTV8CNoFnDef = external {{.*}}constant {{{ \[[^]]*\] }}}, align 8{{$}}
-// CHECK-NO-DTOR-O1-DAG: @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOFNDEF_VTABLE_VAR:![0-9]+]]
+// CHECK-NO-DTOR-O0-DAG:       @_ZTV8CNoFnDef = external {{.*}}constant {{{ \[[^]]*\] }}}, align 8{{$}}
+// CHECK-NO-DTOR-O1-DAG:       @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[NOFNDEF_VTABLE_VAR:![0-9]+]]
+// CHECK-NO-DTOR-O1-NODBG-DAG: @_ZTV8CNoFnDef = available_externally {{.*}}constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8{{$}}
 
 // CHECK-NO-DTOR: !llvm.dbg.cu
 
diff --git a/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp
index 5bf7dc15c46d0..e9dc4c1c122ea 100644
--- a/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-diamond.cpp
@@ -44,6 +44,8 @@ int main() {
 
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
 
 // CHECK: $_ZTVN3NSP5CBaseE = comdat any
 // CHECK: $_ZTVN5NSP_15CLeftE = comdat any
diff --git a/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp
index 3b7e3a74f8eac..62bc18d58bb5f 100644
--- a/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-multiple.cpp
@@ -38,6 +38,8 @@ int main() {
 
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
 
 // CHECK: $_ZTVN5NSP_18CBaseOneE = comdat any
 // CHECK: $_ZTVN5NSP_28CBaseTwoE = comdat any
diff --git a/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp
index bcf8ff73cee69..19752eb37f5a8 100644
--- a/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-simple-main.cpp
@@ -108,6 +108,30 @@ int main() {
 // RUN: llvm-link %t.simple-base.bc %t.simple-derived.bc %t.simple-main.bc -S -o %t.simple-combined.ll
 // RUN: FileCheck --input-file=%t.simple-combined.ll -check-prefix=CHECK-TWO %s
 
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-base.bc    -DBASE_CODE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-derived.bc -DDERIVED_CODE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-main.bc    -DMAIN_CODE
+// RUN: llvm-link %t.simple-base.bc %t.simple-derived.bc %t.simple-main.bc -S -o %t.simple-combined.ll
+// RUN: FileCheck --input-file=%t.simple-combined.ll -check-prefix=CHECK-ONE %s
+
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-base.bc    -DBASE_CODE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-derived.bc -DDERIVED_CODE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-main.bc    -DMAIN_CODE
+// RUN: llvm-link %t.simple-base.bc %t.simple-derived.bc %t.simple-main.bc -S -o %t.simple-combined.ll
+// RUN: FileCheck --input-file=%t.simple-combined.ll -check-prefix=CHECK-ONE %s
+
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-base.bc    -DBASE_CODE    -DSYMBOL_AT_FILE_SCOPE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-derived.bc -DDERIVED_CODE -DSYMBOL_AT_FILE_SCOPE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 %s -o %t.simple-main.bc    -DMAIN_CODE    -DSYMBOL_AT_FILE_SCOPE
+// RUN: llvm-link %t.simple-base.bc %t.simple-derived.bc %t.simple-main.bc -S -o %t.simple-combined.ll
+// RUN: FileCheck --input-file=%t.simple-combined.ll -check-prefix=CHECK-TWO %s
+
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-base.bc    -DBASE_CODE    -DSYMBOL_AT_FILE_SCOPE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-derived.bc -DDERIVED_CODE -DSYMBOL_AT_FILE_SCOPE
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm-bc -debug-info-kind=limited -dwarf-version=5 -O0 -flto %s -o %t.simple-main.bc    -DMAIN_CODE    -DSYMBOL_AT_FILE_SCOPE
+// RUN: llvm-link %t.simple-base.bc %t.simple-derived.bc %t.simple-main.bc -S -o %t.simple-combined.ll
+// RUN: FileCheck --input-file=%t.simple-combined.ll -check-prefix=CHECK-TWO %s
+
 // CHECK-ONE: ${{_ZN3NSP5CBaseC2Ev|_ZN8CDerivedC2Ev}} = comdat any
 // CHECK-ONE: ${{_ZN3NSP5CBaseC2Ev|_ZN8CDerivedC2Ev}} = comdat any
 
diff --git a/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp
index 8d8c778dbb04e..d2c6d41527202 100644
--- a/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-simple.cpp
@@ -30,19 +30,14 @@ int main() {
 
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
-// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s --check-prefix=COFF
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
 
 // CHECK: $_ZTVN3NSP5CBaseE = comdat any
 // CHECK: $_ZTV8CDerived = comdat any
 
 // CHECK: @_ZTVN3NSP5CBaseE = linkonce_odr {{.*}}unnamed_addr constant {{.*}}, comdat, align 8, !dbg [[BASE_VTABLE_VAR:![0-9]*]]
 // CHECK: @_ZTV8CDerived = linkonce_odr {{.*}}unnamed_addr constant {{.*}}, comdat, align 8, !dbg [[DERIVED_VTABLE_VAR:![0-9]*]]
-// COFF: @_ZTVN3NSP5CBaseE = linkonce_odr {{.*}}unnamed_addr constant {{.*}}, comdat, align 8
-// COFF-NOT: !dbg
-// COFF-SAME: {{$}}
-// COFF: @_ZTV8CDerived = linkonce_odr {{.*}}unnamed_addr constant {{.*}}, comdat, align 8
-// COFF-NOT: !dbg
-// COFF-SAME: {{$}}
 
 // CHECK: [[BASE_VTABLE_VAR]] = !DIGlobalVariableExpression(var: [[BASE_VTABLE:![0-9]*]], expr: !DIExpression())
 // CHECK-NEXT: [[BASE_VTABLE]] = distinct !DIGlobalVariable(name: "_vtable$", linkageName: "_ZTVN3NSP5CBaseE"
diff --git a/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp b/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp
index c3015f0498419..9aac8ddc03e88 100644
--- a/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-inheritance-virtual.cpp
@@ -44,6 +44,8 @@ int main() {
 
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - | FileCheck %s
 
 // CHECK: $_ZTVN3NSP5CBaseE = comdat any
 // CHECK: $_ZTVN5NSP_15CLeftE = comdat any
diff --git a/clang/test/DebugInfo/CXX/vtable-template-instantiation.cpp b/clang/test/DebugInfo/CXX/vtable-template-instantiation.cpp
index 60726d253a686..0eb1cfec01c36 100644
--- a/clang/test/DebugInfo/CXX/vtable-template-instantiation.cpp
+++ b/clang/test/DebugInfo/CXX/vtable-template-instantiation.cpp
@@ -12,7 +12,7 @@
 //   * Its '_vtable$' is NOT generated
 //  # when optimized even if no LLVM passes
 //   * The vtable is declared as `available_externally` (which is potentially turned into `external` by LLVM passes)
-//   * Its '_vtable$' is generated
+//   * Its '_vtable$' is generated only if the compiler is targeting the non-COFF platforms
 
 struct CBase {
   virtual void f() noexcept {}
@@ -54,8 +54,17 @@ int main() {
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DNOCAST    | FileCheck %s -check-prefixes IMPLICIT
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DEXPLICIT  | FileCheck %s -check-prefixes EXPLICIT
 // RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DEXPLICIT  | FileCheck %s -check-prefixes EXPLICIT
-// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O0
-// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O1
+// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O0,EXTERN-NODBG
+// RUN: %clang_cc1 -triple x86_64-linux -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O1,EXTERN-DBG
+
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o -             | FileCheck %s -check-prefixes IMPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o -             | FileCheck %s -check-prefixes IMPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DNOCAST    | FileCheck %s -check-prefixes IMPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DNOCAST    | FileCheck %s -check-prefixes IMPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DEXPLICIT  | FileCheck %s -check-prefixes EXPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DEXPLICIT  | FileCheck %s -check-prefixes EXPLICIT
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O0 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O0,EXTERN-NODBG
+// RUN: %clang_cc1 -triple x86_64-mingw -emit-llvm -debug-info-kind=limited -dwarf-version=5 -O1 -disable-llvm-passes %s -o - -DEXTERN    | FileCheck %s -check-prefixes EXTERN,EXTERN-O1-NODBG,EXTERN-NODBG
 
 // IMPLICIT: $_ZTV9CTemplateIvE = comdat any
 // IMPLICIT: @_ZTV9CTemplateIvE = linkonce_odr {{.*}}unnamed_addr constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, comdat, align 8, !dbg [[VTABLE_VAR:![0-9]*]]
@@ -74,11 +83,15 @@ int main() {
 // EXPLICIT-DAG: [[PVOID]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
 
 // EXTERN-NOT: $_ZTV9CTemplateIvE
-// EXTERN-O0: @_ZTV9CTemplateIvE = external {{.*}}unnamed_addr constant {{{ \[[^]]*\] }}}, align 8{{$}}
-// EXTERN-O1: @_ZTV9CTemplateIvE = available_externally {{.*}}unnamed_addr constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[VTABLE_VAR:![0-9]*]]
-// EXTERN-O0-NOT: linkageName: "_ZTV9CTemplateIvE"
-// EXTERN-O1-DAG: [[VTABLE:![0-9]+]] = distinct !DIGlobalVariable(name: "_vtable$", linkageName: "_ZTV9CTemplateIvE"
-// EXTERN-O1-DAG: [[VTABLE_VAR]] = !DIGlobalVariableExpression(var: [[VTABLE]], expr: !DIExpression())
-// EXTERN-O1-DAG: [[TYPE:![0-9]+]] = !DICompositeType(tag: DW_TAG_structure_type, name: "CTemplate<void>"
-// EXTERN-O1-DAG: !DIDerivedType(tag: DW_TAG_variable, name: "_vtable$", scope: [[TYPE]], file: {{.*}}, baseType: [[PVOID:![0-9]+]], flags: DIFlagPrivate | DIFlagArtificial | DIFlagStaticMember)
-// EXTERN-O1-DAG: [[PVOID]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+// EXTERN-O0:       @_ZTV9CTemplateIvE = external {{.*}}unnamed_addr constant {{{ \[[^]]*\] }}}, align 8{{$}}
+// EXTERN-O1:       @_ZTV9CTemplateIvE = available_externally {{.*}}unnamed_addr constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8, !dbg [[VTABLE_VAR:![0-9]*]]
+// EXTERN-O1-NODBG: @_ZTV9CTemplateIvE = available_externally {{.*}}unnamed_addr constant {{{ \[[^]]*\] } { \[[^]]*\] \[[^]]*\] }}}, align 8{{$}}
+// EXTERN-NODBG-DAG: [[TYPE:![0-9]+]] = !DICompositeType(tag: DW_TAG_structure_type, name: "CTemplate<void>"
+// EXTERN-NODBG-DAG: !DICompileUnit
+// EXTERN-NODBG-NOT: !DIGlobalVariable(name: "_vtable$", linkageName: "_ZTV9CTemplateIvE"
+// EXTERN-NODBG-NOT: !DIDerivedType(tag: DW_TAG_variable, name: "_vtable$", scope: [[TYPE]], file: {{.*}}, baseType: [[PVOID:![0-9]+]], flags: DIFlagPrivate | DIFlagArtificial | DIFlagStaticMember)
+// EXTERN-DBG-DAG: [[VTABLE:![0-9]+]] = distinct !DIGlobalVariable(name: "_vtable$", linkageName: "_ZTV9CTemplateIvE"
+// EXTERN-DBG-DAG: [[VTABLE_VAR]] = !DIGlobalVariableExpression(var: [[VTABLE]], expr: !DIExpression())
+// EXTERN-DBG-DAG: [[TYPE:![0-9]+]] = !DICompositeType(tag: DW_TAG_structure_type, name: "CTemplate<void>"
+// EXTERN-DBG-DAG: !DIDerivedType(tag: DW_TAG_variable, name: "_vtable$", scope: [[TYPE]], file: {{.*}}, baseType: [[PVOID:![0-9]+]], flags: DIFlagPrivate | DIFlagArtificial | DIFlagStaticMember)
+// EXTERN-DBG-DAG: [[PVOID]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
diff --git a/clang/test/Modules/ExtDebugInfo.cpp b/clang/test/Modules/ExtDebugInfo.cpp
index 3e74e2291d5e4..184973bc1783c 100644
--- a/clang/test/Modules/ExtDebugInfo.cpp
+++ b/clang/test/Modules/ExtDebugInfo.cpp
@@ -8,7 +8,7 @@
 // RUN:     -fmodule-format=obj -fimplicit-module-maps -DMODULES \
 // RUN:     -triple %itanium_abi_triple \
 // RUN:     -fmodules-cache-path=%t %s -I %S/Inputs -I %t -emit-llvm -o %t-mod.ll
-// RUN: cat %t-mod.ll |  FileCheck %s --check-prefix=CHECK %if target={{.*-(win|mingw|cyg).*}} %{--check-prefix=CHECKCOFF%} %else %{--check-prefix=CHECKELF%}
+// RUN: cat %t-mod.ll |  FileCheck %s
 
 // PCH:
 // RUN: %clang_cc1 -x c++ -std=c++11 -fmodule-format=obj -emit-pch -I%S/Inputs \
@@ -18,7 +18,7 @@
 // RUN:     -dwarf-ext-refs -fmodule-format=obj \
 // RUN:     -triple %itanium_abi_triple \
 // RUN:     -include-pch %t.pch %s -emit-llvm -o %t-pch.ll
-// RUN: cat %t-pch.ll |  FileCheck %s --check-prefix=CHECK %if target={{.*-(win|mingw|cyg).*}} %{--check-prefix=CHECKCOFF%} %else %{--check-prefix=CHECKELF%}
+// RUN: cat %t-pch.ll |  FileCheck %s
 // RUN: cat %t-pch.ll |  FileCheck %s --check-prefix=CHECK-PCH
 
 #ifdef MODULES
@@ -208,9 +208,9 @@ void foo() {
 // CHECK-SAME:              name: "InAnonymousNamespace", {{.*}}DIFlagFwdDecl)
 
 // There is a full definition of the type available in the module.
-// CHECKELF: !DICompositeType(tag: DW_TAG_structure_type, name: "Virtual",
-// CHECKELF-SAME:             DIFlagFwdDecl
-// CHECKELF-SAME:             identifier: "_ZTS7Virtual")
+// CHECK: !DICompositeType(tag: DW_TAG_structure_type, name: "Virtual",
+// CHECK-SAME:             DIFlagFwdDecl
+// CHECK-SAME:             identifier: "_ZTS7Virtual")
 
 // CHECK: !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !{{[0-9]+}}, entity: ![[STRUCT]], file: ![[CPP]], line: 50)
 
@@ -222,8 +222,3 @@ void foo() {
 
 // CHECK: !DICompositeType(tag: DW_TAG_class_type, name: "A",
 // CHECK-SAME:             DIFlagFwdDecl
-
-// There is a full definition of the type available in the module.
-// CHECKCOFF: !DICompositeType(tag: DW_TAG_structure_type, name: "Virtual",
-// CHECKCOFF-SAME:             DIFlagFwdDecl
-// CHECKCOFF-SAME:             identifier: "_ZTS7Virtual")

>From 7483b7a522b1de393142c5ece27326372ca146a5 Mon Sep 17 00:00:00 2001
From: Srinivasa Ravi <srinivasar at nvidia.com>
Date: Tue, 23 Sep 2025 13:44:01 +0530
Subject: [PATCH 19/42] [MLIR][NVVM] Fix undef in cp.async.bulk.tensor.reduce
 Op (#157423)

This change:
- Moves the LLVMIR lowering code of the NVVM dialect
  `cp.async.bulk.tensor.reduce` Op to `NVVMDialect.cpp`.
- Fixes the usage of `undef` in the lowering since it is now
  deprecated.
- Removes macros to use a table to look up intrinsics instead.

The tests are updated accordingly.
---
 mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td   |  30 +--
 mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp    | 183 +++++++++++++-----
 .../Target/LLVMIR/nvvm/tma_store_reduce.mlir  | 128 ++++++------
 3 files changed, 209 insertions(+), 132 deletions(-)

diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 05ca69e404ba9..f56c1e5b936e6 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -3207,35 +3207,17 @@ def NVVM_CpAsyncBulkTensorReduceOp :
   }];
 
   let extraClassDeclaration = [{
-    static llvm::Intrinsic::ID getIntrinsicID(int tensorDims,
-                                              NVVM::TMAReduxKind kind,
-                                              bool isIm2Col);
+    static mlir::NVVM::IDArgPair
+    getIntrinsicIDAndArgs(Operation &op,
+      LLVM::ModuleTranslation &mt, llvm::IRBuilderBase& builder);
   }];
 
   let hasVerifier = 1;
 
   string llvmBuilder = [{
-    // Arguments to the intrinsic:
-    // shared_mem_ptr, tmaDesc, tensorDims
-    // cache_hint(if applicable) and flag(boolean)
-    llvm::SmallVector<llvm::Value *> translatedOperands;
-    translatedOperands.push_back($srcMem);
-    translatedOperands.push_back($tmaDescriptor);
-
-    for (auto v : op.getCoordinates())
-      translatedOperands.push_back(moduleTranslation.lookupValue(v));
-
-    llvm::LLVMContext &ctx = moduleTranslation.getLLVMContext();
-    auto *i64Undef = llvm::UndefValue::get(llvm::IntegerType::get(ctx, 64));
-
-    bool isCacheHint = op.getL2CacheHint() ? true : false;
-    translatedOperands.push_back(isCacheHint ? $l2CacheHint : i64Undef);
-    translatedOperands.push_back(builder.getInt1(isCacheHint));
-
-    auto intId = NVVM::CpAsyncBulkTensorReduceOp::getIntrinsicID(
-                 op.getCoordinates().size(), $redKind,
-                 (op.getMode() == NVVM::TMAStoreMode::IM2COL));
-    createIntrinsicCall(builder, intId, translatedOperands);
+    auto [id, args] = NVVM::CpAsyncBulkTensorReduceOp::getIntrinsicIDAndArgs(
+                      *op, moduleTranslation, builder);
+    createIntrinsicCall(builder, id, args);
   }];
 }
 
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index cc2a656ccb17f..682bf8cc102cb 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -1802,53 +1802,148 @@ CpAsyncBulkTensorSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
   return {id, std::move(args)};
 }
 
-#define CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, mode)                        \
-  llvm::Intrinsic::nvvm_cp_async_bulk_tensor_##op##_##mode##_##dim##d
+NVVM::IDArgPair CpAsyncBulkTensorReduceOp::getIntrinsicIDAndArgs(
+    Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
+  auto thisOp = cast<NVVM::CpAsyncBulkTensorReduceOp>(op);
+  llvm::LLVMContext &ctx = mt.getLLVMContext();
 
-#define CP_ASYNC_BULK_TENSOR_REDUCE(op, dim, is_im2col)                        \
-  is_im2col ? CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, im2col)                \
-            : CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, dim, tile)
+  llvm::SmallVector<llvm::Value *> args;
 
-#define GET_CP_ASYNC_BULK_TENSOR_ID(op, dims, is_im2col)                       \
-  [&]() -> auto {                                                              \
-    switch (dims) {                                                            \
-    case 1:                                                                    \
-      return CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, 1, tile);                    \
-    case 2:                                                                    \
-      return CP_ASYNC_BULK_TENSOR_REDUCE_MODE(op, 2, tile);                    \
-    case 3:                                                                    \
-      return CP_ASYNC_BULK_TENSOR_REDUCE(op, 3, is_im2col);                    \
-    case 4:                                                                    \
-      return CP_ASYNC_BULK_TENSOR_REDUCE(op, 4, is_im2col);                    \
-    case 5:                                                                    \
-      return CP_ASYNC_BULK_TENSOR_REDUCE(op, 5, is_im2col);                    \
-    default:                                                                   \
-      llvm_unreachable("Invalid TensorDim in CpAsyncBulkTensorReduceOp.");     \
-    }                                                                          \
-  }()
+  // Arguments to the intrinsic:
+  // shared_mem_ptr, tmaDesc, tensorDims
+  // cache_hint(if applicable) and flag(boolean)
+  args.push_back(mt.lookupValue(thisOp.getSrcMem()));
+  args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
+
+  for (Value v : thisOp.getCoordinates())
+    args.push_back(mt.lookupValue(v));
+
+  mlir::Value cacheHint = thisOp.getL2CacheHint();
+  const bool hasCacheHint = static_cast<bool>(cacheHint);
+  llvm::Value *i64ZeroValue =
+      llvm::ConstantInt::get(llvm::Type::getInt64Ty(ctx), 0);
+  args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64ZeroValue);
+  args.push_back(builder.getInt1(hasCacheHint));
+
+  const llvm::Intrinsic::ID notIntrinsic = llvm::Intrinsic::not_intrinsic;
+
+  constexpr unsigned numRedKinds = 8; // ADD, MIN, MAX, INC, DEC, AND, OR, XOR
+  constexpr unsigned numLayouts = 2;  // TILE, IM2COL
+  constexpr unsigned maxDim = 5;      // 1D to 5D
+  using row = std::array<llvm::Intrinsic::ID, maxDim + 1>;
+  using layoutTable = std::array<row, numLayouts>;
+  using fullTable = std::array<layoutTable, numRedKinds>;
+  static constexpr fullTable IDTable{
+      {// RedTy::ADD
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_5d}}}},
+       // RedTy::MIN
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_5d}}}},
+       // RedTy::MAX
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_5d}}}},
+       // RedTy::INC
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_5d}}}},
+       // RedTy::DEC
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_5d}}}},
+       // RedTy::AND
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_5d}}}},
+       // RedTy::OR
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_5d}}}},
+       // RedTy::XOR
+       {{{{notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_1d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_2d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_4d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_5d}},
+         {{notIntrinsic, notIntrinsic, notIntrinsic,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_3d,
+           llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_4d,
+           llvm::Intrinsic::
+               nvvm_cp_async_bulk_tensor_reduce_xor_im2col_5d}}}}}};
+
+  static_assert(getMaxEnumValForTMAReduxKind() == std::size(IDTable) - 1,
+                "TMAReduxKinds must match number of rows in IDTable");
+
+  size_t redKind = static_cast<size_t>(thisOp.getRedKind());
+  size_t mode = static_cast<size_t>(thisOp.getMode());
+  size_t dim = thisOp.getCoordinates().size();
+
+  assert(redKind < IDTable.size() &&
+         "Invalid redKind for CpAsyncBulkTensorReduceOp");
+  assert(mode < IDTable[redKind].size() &&
+         "Invalid mode for CpAsyncBulkTensorReduceOp");
+  assert(dim < IDTable[redKind][mode].size() &&
+         "Invalid dim for CpAsyncBulkTensorReduceOp");
+
+  llvm::Intrinsic::ID intrinsicID = IDTable[redKind][mode][dim];
+
+  assert(intrinsicID != notIntrinsic &&
+         "Invalid intrinsic for CpAsyncBulkTensorReduceOp.");
 
-llvm::Intrinsic::ID CpAsyncBulkTensorReduceOp::getIntrinsicID(
-    int tensorDims, NVVM::TMAReduxKind kind, bool isIm2Col) {
-  using RedTy = NVVM::TMAReduxKind;
-  switch (kind) {
-  case RedTy::ADD:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_add, tensorDims, isIm2Col);
-  case RedTy::MIN:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_min, tensorDims, isIm2Col);
-  case RedTy::MAX:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_max, tensorDims, isIm2Col);
-  case RedTy::INC:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_inc, tensorDims, isIm2Col);
-  case RedTy::DEC:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_dec, tensorDims, isIm2Col);
-  case RedTy::AND:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_and, tensorDims, isIm2Col);
-  case RedTy::OR:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_or, tensorDims, isIm2Col);
-  case RedTy::XOR:
-    return GET_CP_ASYNC_BULK_TENSOR_ID(reduce_xor, tensorDims, isIm2Col);
-  }
-  llvm_unreachable("Invalid Reduction Op for CpAsyncBulkTensorReduceOp");
+  return {intrinsicID, std::move(args)};
 }
 
 #define _none
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce.mlir
index 6e0b48489e8b0..2231f1dabd504 100644
--- a/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce.mlir
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_store_reduce.mlir
@@ -19,14 +19,14 @@ llvm.func @tma_store_reduce_1d(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %d0 :
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.1d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] {redKind = #nvvm.tma_redux_kind<add>, mode = #nvvm.tma_store_mode<tile>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] {redKind = #nvvm.tma_redux_kind<min>, mode = #nvvm.tma_store_mode<tile>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0] {redKind = #nvvm.tma_redux_kind<max>, mode = #nvvm.tma_store_mode<tile>} : !llvm.ptr, !llvm.ptr<3>
@@ -59,14 +59,14 @@ llvm.func @tma_store_reduce_2d(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr, %d0 :
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.2d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] {redKind = #nvvm.tma_redux_kind<add>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] {redKind = #nvvm.tma_redux_kind<min>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1] {redKind = #nvvm.tma_redux_kind<max>} : !llvm.ptr, !llvm.ptr<3>
@@ -99,14 +99,14 @@ llvm.func @tma_store_reduce_3d_tile(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr,
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<add>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<min>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<max>} : !llvm.ptr, !llvm.ptr<3>
@@ -137,14 +137,14 @@ llvm.func @tma_store_reduce_3d_im2col(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>, mode = #nvvm.tma_store_mode<im2col>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.3d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<add>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<min>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2] {redKind = #nvvm.tma_redux_kind<max>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
@@ -177,14 +177,14 @@ llvm.func @tma_store_reduce_4d_tile(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr,
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<add>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<min>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<max>} : !llvm.ptr, !llvm.ptr<3>
@@ -215,14 +215,14 @@ llvm.func @tma_store_reduce_4d_im2col(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>, mode = #nvvm.tma_store_mode<im2col>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.4d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<add>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<min>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3] {redKind = #nvvm.tma_redux_kind<max>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
@@ -255,14 +255,14 @@ llvm.func @tma_store_reduce_5d_tile(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr,
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.tile.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<add>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<min>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<max>} : !llvm.ptr, !llvm.ptr<3>
@@ -293,14 +293,14 @@ llvm.func @tma_store_reduce_5d_im2col(%src : !llvm.ptr<3>, %tma_desc : !llvm.ptr
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<or>, mode = #nvvm.tma_store_mode<im2col>}  : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] l2_cache_hint = %ch {redKind = #nvvm.tma_redux_kind<xor>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
 
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
-  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 undef, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.add.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.min.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.max.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.inc.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.dec.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.and.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.or.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
+  // CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.reduce.xor.im2col.5d(ptr addrspace(3) %[[SRC]], ptr %[[DST]], i32 %[[D0]], i32 %[[D1]], i32 %[[D2]], i32 %[[D3]], i32 %[[D4]], i64 0, i1 false)
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<add>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<min>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>
   nvvm.cp.async.bulk.tensor.reduce %tma_desc, %src, box[%d0, %d1, %d2, %d3, %d4] {redKind = #nvvm.tma_redux_kind<max>, mode = #nvvm.tma_store_mode<im2col>} : !llvm.ptr, !llvm.ptr<3>

>From 831cf94d1b3935b031ff0d42b9c2745103c50189 Mon Sep 17 00:00:00 2001
From: Jinjie Huang <huangjinjie at bytedance.com>
Date: Tue, 23 Sep 2025 16:31:03 +0800
Subject: [PATCH 20/42] [BOLT][DWARF] Skip processing DWARF CUs with a DWO ID
 but no DWO name (#154749)

This patch tries to skip processing DWARF CUs with a DWO ID but no DWO
name, and ensure them not included in the final binary.
---
 bolt/include/bolt/Core/BinaryContext.h  |   3 +
 bolt/include/bolt/Core/DIEBuilder.h     |   3 +-
 bolt/lib/Core/BinaryContext.cpp         |  16 +
 bolt/lib/Core/DIEBuilder.cpp            |  10 +-
 bolt/test/X86/dwarf5-dwoid-no-dwoname.s | 629 ++++++++++++++++++++++++
 5 files changed, 657 insertions(+), 4 deletions(-)
 create mode 100644 bolt/test/X86/dwarf5-dwoid-no-dwoname.s

diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 72c8817daa714..082f1cec34d52 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -326,6 +326,9 @@ class BinaryContext {
   /// Returns true if DWARF4 or lower is used.
   bool isDWARFLegacyUsed() const { return ContainsDwarfLegacy; }
 
+  /// Returns true if DWARFUnit is valid.
+  bool isValidDwarfUnit(DWARFUnit &DU) const;
+
   std::map<unsigned, DwarfLineTable> &getDwarfLineTables() {
     return DwarfLineTablesCUMap;
   }
diff --git a/bolt/include/bolt/Core/DIEBuilder.h b/bolt/include/bolt/Core/DIEBuilder.h
index e4a4fc6b2f258..4c3c277adf422 100644
--- a/bolt/include/bolt/Core/DIEBuilder.h
+++ b/bolt/include/bolt/Core/DIEBuilder.h
@@ -217,7 +217,8 @@ class DIEBuilder {
                                std::optional<BOLTDWARF5AccelTableData *> Parent,
                                uint32_t NumberParentsInChain);
 
-  void registerUnit(DWARFUnit &DU, bool NeedSort);
+  /// Returns true if DWARFUnit is registered successfully.
+  bool registerUnit(DWARFUnit &DU, bool NeedSort);
 
   /// \return the unique ID of \p U if it exists.
   std::optional<uint32_t> getUnitId(const DWARFUnit &DU);
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index 72c72bbaf4a65..98440cde7cebd 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -1624,10 +1624,26 @@ DWARFContext *BinaryContext::getDWOContext() const {
   return &DWOCUs.begin()->second->getContext();
 }
 
+bool BinaryContext::isValidDwarfUnit(DWARFUnit &DU) const {
+  // Invalid DWARF unit with a DWOId but lacking a dwo_name.
+  if (DU.getDWOId() && !DU.isDWOUnit() &&
+      !DU.getUnitDIE().find(
+          {dwarf::DW_AT_dwo_name, dwarf::DW_AT_GNU_dwo_name})) {
+    this->outs() << "BOLT-ERROR: broken DWARF found in CU at offset 0x"
+                 << Twine::utohexstr(DU.getOffset()) << " (DWOId=0x"
+                 << Twine::utohexstr(*(DU.getDWOId()))
+                 << ", missing DW_AT_dwo_name / DW_AT_GNU_dwo_name)\n";
+    return false;
+  }
+  return true;
+}
+
 /// Handles DWO sections that can either be in .o, .dwo or .dwp files.
 void BinaryContext::preprocessDWODebugInfo() {
   for (const std::unique_ptr<DWARFUnit> &CU : DwCtx->compile_units()) {
     DWARFUnit *const DwarfUnit = CU.get();
+    if (!isValidDwarfUnit(*DwarfUnit))
+      continue;
     if (std::optional<uint64_t> DWOId = DwarfUnit->getDWOId()) {
       std::string DWOName = dwarf::toString(
           DwarfUnit->getUnitDIE().find(
diff --git a/bolt/lib/Core/DIEBuilder.cpp b/bolt/lib/Core/DIEBuilder.cpp
index b041dc5ea1cce..7ce55f9165136 100644
--- a/bolt/lib/Core/DIEBuilder.cpp
+++ b/bolt/lib/Core/DIEBuilder.cpp
@@ -584,7 +584,8 @@ DWARFDie DIEBuilder::resolveDIEReference(
   if ((RefCU =
            getUnitForOffset(*this, *DwarfContext, TmpRefOffset, AttrSpec))) {
     /// Trying to add to current working set in case it's cross CU reference.
-    registerUnit(*RefCU, true);
+    if (!registerUnit(*RefCU, true))
+      return DWARFDie();
     DWARFDataExtractor DebugInfoData = RefCU->getDebugInfoExtractor();
     if (DwarfDebugInfoEntry.extractFast(*RefCU, &TmpRefOffset, DebugInfoData,
                                         RefCU->getNextUnitOffset(), 0)) {
@@ -1008,12 +1009,14 @@ static uint64_t getHash(const DWARFUnit &DU) {
   return DU.getOffset();
 }
 
-void DIEBuilder::registerUnit(DWARFUnit &DU, bool NeedSort) {
+bool DIEBuilder::registerUnit(DWARFUnit &DU, bool NeedSort) {
+  if (!BC.isValidDwarfUnit(DU))
+    return false;
   auto IterGlobal = AllProcessed.insert(getHash(DU));
   // If DU is already in a current working set or was already processed we can
   // skip it.
   if (!IterGlobal.second)
-    return;
+    return true;
   if (getState().Type == ProcessingType::DWARF4TUs) {
     getState().DWARF4TUVector.push_back(&DU);
   } else if (getState().Type == ProcessingType::DWARF5TUs) {
@@ -1034,6 +1037,7 @@ void DIEBuilder::registerUnit(DWARFUnit &DU, bool NeedSort) {
   if (getState().DUList.size() == getState().CloneUnitCtxMap.size())
     getState().CloneUnitCtxMap.emplace_back();
   getState().DUList.push_back(&DU);
+  return true;
 }
 
 std::optional<uint32_t> DIEBuilder::getUnitId(const DWARFUnit &DU) {
diff --git a/bolt/test/X86/dwarf5-dwoid-no-dwoname.s b/bolt/test/X86/dwarf5-dwoid-no-dwoname.s
new file mode 100644
index 0000000000000..415d0b8f987e6
--- /dev/null
+++ b/bolt/test/X86/dwarf5-dwoid-no-dwoname.s
@@ -0,0 +1,629 @@
+## Check that DWARF CU with a valid DWOId but missing a dwo_name is correctly detected.
+# RUN: rm -rf %t && mkdir -p %t && cd %t
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -split-dwarf-file=main.dwo -o main.o
+# RUN: %clang -O3 -g -gdwarf-5 -gsplit-dwarf -Wl,-q %t/main.o -o main.exe
+# RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections  2>&1 | FileCheck %s --check-prefix=PRECHECK
+# PRECHECK: BOLT-ERROR: broken DWARF found in CU at offset 0x3e (DWOId=0x0, missing DW_AT_dwo_name / DW_AT_GNU_dwo_name)
+
+## Checks that Broken dwarf CU is removed
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t/main.exe.bolt | FileCheck %s --check-prefix=POSTCHECK
+# POSTCHECK-LABEL: .debug_info contents:
+# POSTCHECK: DW_TAG_skeleton_unit
+# POSTCHECK-DAG: DW_AT_dwo_name{{.*=.*\.dwo.*}}
+# POSTCHECK: NULL
+# POSTCHECK-NOT: DW_TAG_skeleton_unit
+
+	.text
+	.file	"main.cpp"
+	.section	.rodata.cst16,"aM", at progbits,16
+.LCPI0_0:
+.LCPI0_1:
+.LCPI0_2:
+.LCPI0_3:
+.LCPI0_4:
+.LCPI0_5:
+.LCPI0_6:
+.LCPI0_7:
+.LCPI0_8:
+.LCPI0_9:
+.LCPI0_10:
+	.text
+	.globl	main
+	.type	main, at function
+main:                                   # @main
+.Lfunc_begin0:
+	.file	1 "." "main.cpp" md5 0x8a68374187457ce14ac0c6c2121349a2
+	.loc	1 5 0                           # main.cpp:5:0
+# %bb.0:                                # %vector.ph
+.Ltmp0:
+.Ltmp1:
+.LBB0_1:                                # %vector.body
+.Ltmp2:
+	.file	2 "." "callee.cpp" md5 0x86e19c24983503540b9bb1a6f7bad737
+	.loc	2 8 15 prologue_end             # callee.cpp:8:15
+.Ltmp3:
+	.loc	2 3 15                          # callee.cpp:3:15
+.Ltmp4:
+	.loc	2 8 15                          # callee.cpp:8:15
+.Ltmp5:
+	.loc	2 9 19                          # callee.cpp:9:19
+.Ltmp6:
+	.loc	2 9 13 is_stmt 0                # callee.cpp:9:13
+.Ltmp7:
+	.loc	2 3 15 is_stmt 1                # callee.cpp:3:15
+	.loc	2 3 19 is_stmt 0                # callee.cpp:3:19
+.Ltmp8:
+	.loc	2 4 19 is_stmt 1                # callee.cpp:4:19
+.Ltmp9:
+	.loc	2 4 13 is_stmt 0                # callee.cpp:4:13
+.Ltmp10:
+	.loc	2 4 19                          # callee.cpp:4:19
+.Ltmp11:
+	.loc	2 4 13                          # callee.cpp:4:13
+.Ltmp12:
+	.loc	2 2 12 is_stmt 1                # callee.cpp:2:12
+	.loc	2 2 17 is_stmt 0                # callee.cpp:2:17
+.Ltmp13:
+	.loc	2 4 13 is_stmt 1                # callee.cpp:4:13
+.Ltmp14:
+	.loc	2 0 0 is_stmt 0                 # callee.cpp:0:0
+.Ltmp15:
+	.loc	1 8 13 is_stmt 1                # main.cpp:8:13
+.Ltmp16:
+	.loc	2 0 0 is_stmt 0                 # callee.cpp:0:0
+.Ltmp17:
+	.loc	1 8 13                          # main.cpp:8:13
+.Ltmp18:
+	.loc	1 7 35 is_stmt 1                # main.cpp:7:35
+.Ltmp19:
+# %bb.2:                                # %middle.block
+	.loc	1 7 5 is_stmt 0                 # main.cpp:7:5
+.Ltmp20:
+	.loc	1 11 9 is_stmt 1                # main.cpp:11:9
+.Ltmp21:
+	.loc	1 15 1                          # main.cpp:15:1
+	retq
+.Ltmp22:
+.Lfunc_end0:
+	.size	main, .Lfunc_end0-main
+	.section	.debug_abbrev,"", at progbits
+	.byte	1                               # Abbreviation Code
+	.byte	74                              # DW_TAG_skeleton_unit
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	16                              # DW_AT_stmt_list
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	114                             # DW_AT_str_offsets_base
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	27                              # DW_AT_comp_dir
+	.byte	37                              # DW_FORM_strx1
+	.ascii	"\264B"                         # DW_AT_GNU_pubnames
+	.byte	25                              # DW_FORM_flag_present
+	.byte	118                             # DW_AT_dwo_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	17                              # DW_AT_low_pc
+	.byte	27                              # DW_FORM_addrx
+	.byte	18                              # DW_AT_high_pc
+	.byte	6                               # DW_FORM_data4
+	.byte	115                             # DW_AT_addr_base
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	116                             # DW_AT_rnglists_base
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	2                               # Abbreviation Code
+	.byte	46                              # DW_TAG_subprogram
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	17                              # DW_AT_low_pc
+	.byte	27                              # DW_FORM_addrx
+	.byte	18                              # DW_AT_high_pc
+	.byte	6                               # DW_FORM_data4
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	3                               # Abbreviation Code
+	.byte	29                              # DW_TAG_inlined_subroutine
+	.byte	0                               # DW_CHILDREN_no
+	.byte	49                              # DW_AT_abstract_origin
+	.byte	16                              # DW_FORM_ref_addr
+	.byte	85                              # DW_AT_ranges
+	.byte	35                              # DW_FORM_rnglistx
+	.byte	88                              # DW_AT_call_file
+	.byte	11                              # DW_FORM_data1
+	.byte	89                              # DW_AT_call_line
+	.byte	11                              # DW_FORM_data1
+	.byte	87                              # DW_AT_call_column
+	.byte	11                              # DW_FORM_data1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	4                               # Abbreviation Code
+	.byte	74                              # DW_TAG_skeleton_unit
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	16                              # DW_AT_stmt_list
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	114                             # DW_AT_str_offsets_base
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	27                              # DW_AT_comp_dir
+	.byte	37                              # DW_FORM_strx1
+	.ascii	"\264B"                         # DW_AT_GNU_pubnames
+	.byte	25                              # DW_FORM_flag_present
+	.byte	37                              # DW_AT_producer
+	.byte	37                              # DW_FORM_strx1
+	.byte	19                              # DW_AT_language
+	.byte	5                               # DW_FORM_data2
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	115                             # DW_AT_addr_base
+	.byte	23                              # DW_FORM_sec_offset
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	5                               # Abbreviation Code
+	.byte	46                              # DW_TAG_subprogram
+	.byte	0                               # DW_CHILDREN_no
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	32                              # DW_AT_inline
+	.byte	33                              # DW_FORM_implicit_const
+	.byte	1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	0                               # EOM(3)
+	.section	.debug_info,"", at progbits
+.Lcu_begin0:
+	.long	.Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+	.short	5                               # DWARF version number
+	.byte	4                               # DWARF Unit Type
+	.byte	8                               # Address Size (in bytes)
+	.long	.debug_abbrev                   # Offset Into Abbrev. Section
+	.quad	-1861901018463438211
+	.byte	1                               # Abbrev [1] 0x14:0x2a DW_TAG_skeleton_unit
+	.long	.Lline_table_start0             # DW_AT_stmt_list
+	.long	.Lstr_offsets_base0             # DW_AT_str_offsets_base
+	.byte	0                               # DW_AT_comp_dir
+                                        # DW_AT_GNU_pubnames
+	.byte	3                               # DW_AT_dwo_name
+	.byte	0                               # DW_AT_low_pc
+	.long	.Lfunc_end0-.Lfunc_begin0       # DW_AT_high_pc
+	.long	.Laddr_table_base0              # DW_AT_addr_base
+	.long	.Lrnglists_table_base0          # DW_AT_rnglists_base
+	.byte	2                               # Abbrev [2] 0x2c:0x11 DW_TAG_subprogram
+	.byte	0                               # DW_AT_low_pc
+	.long	.Lfunc_end0-.Lfunc_begin0       # DW_AT_high_pc
+	.byte	2                               # DW_AT_name
+	.byte	3                               # Abbrev [3] 0x33:0x9 DW_TAG_inlined_subroutine
+	.long	.debug_info+100                 # DW_AT_abstract_origin
+	.byte	0                               # DW_AT_ranges
+	.byte	1                               # DW_AT_call_file
+	.byte	8                               # DW_AT_call_line
+	.byte	16                              # DW_AT_call_column
+	.byte	0                               # End Of Children Mark
+	.byte	0                               # End Of Children Mark
+.Ldebug_info_end0:
+.Lcu_begin1:
+	.long	.Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+	.short	5                               # DWARF version number
+	.byte	4                               # DWARF Unit Type
+	.byte	8                               # Address Size (in bytes)
+	.long	.debug_abbrev                   # Offset Into Abbrev. Section
+	.quad	0
+	.byte	4                               # Abbrev [4] 0x14:0x15 DW_TAG_skeleton_unit
+	.long	.Lline_table_start0             # DW_AT_stmt_list
+	.long	.Lstr_offsets_base0             # DW_AT_str_offsets_base
+	.byte	0                               # DW_AT_comp_dir
+                                        # DW_AT_GNU_pubnames
+	.byte	4                               # DW_AT_producer
+	.short	33                              # DW_AT_language
+	.byte	5                               # DW_AT_name
+	.long	.Laddr_table_base0              # DW_AT_addr_base
+	.byte	5                               # Abbrev [5] 0x26:0x2 DW_TAG_subprogram
+	.byte	1                               # DW_AT_name
+                                        # DW_AT_inline
+	.byte	0                               # End Of Children Mark
+.Ldebug_info_end1:
+	.section	.debug_rnglists,"", at progbits
+	.long	.Ldebug_list_header_end0-.Ldebug_list_header_start0 # Length
+.Ldebug_list_header_start0:
+	.short	5                               # Version
+	.byte	8                               # Address size
+	.byte	0                               # Segment selector size
+	.long	1                               # Offset entry count
+.Lrnglists_table_base0:
+	.long	.Ldebug_ranges1-.Lrnglists_table_base0
+.Ldebug_ranges1:
+	.byte	4                               # DW_RLE_offset_pair
+	.uleb128 .Ltmp2-.Lfunc_begin0           #   starting offset
+	.uleb128 .Ltmp15-.Lfunc_begin0          #   ending offset
+	.byte	4                               # DW_RLE_offset_pair
+	.uleb128 .Ltmp16-.Lfunc_begin0          #   starting offset
+	.uleb128 .Ltmp17-.Lfunc_begin0          #   ending offset
+	.byte	0                               # DW_RLE_end_of_list
+.Ldebug_list_header_end0:
+	.section	.debug_str_offsets,"", at progbits
+	.long	28                              # Length of String Offsets Set
+	.short	5
+	.short	0
+.Lstr_offsets_base0:
+	.section	.debug_str,"MS", at progbits,1
+.Lskel_string0:
+	.asciz	"." # string offset=0
+.Lskel_string1:
+	.asciz	"hotFunction"                   # string offset=45
+.Lskel_string2:
+	.asciz	"main"                          # string offset=57
+.Lskel_string3:
+	.asciz	"main.dwo"                      # string offset=62
+.Lskel_string4:
+	.asciz	"clang version 16.0.6" # string offset=71
+.Lskel_string5:
+	.asciz	"callee.cpp"                    # string offset=177
+	.section	.debug_str_offsets,"", at progbits
+	.long	.Lskel_string0
+	.long	.Lskel_string1
+	.long	.Lskel_string2
+	.long	.Lskel_string3
+	.long	.Lskel_string4
+	.long	.Lskel_string5
+	.section	.debug_str_offsets.dwo,"e", at progbits
+	.long	56                              # Length of String Offsets Set
+	.short	5
+	.short	0
+	.section	.debug_str.dwo,"eMS", at progbits,1
+.Linfo_string0:
+	.asciz	"_Z11hotFunctioni"              # string offset=0
+.Linfo_string1:
+	.asciz	"hotFunction"                   # string offset=17
+.Linfo_string2:
+	.asciz	"int"                           # string offset=29
+.Linfo_string3:
+	.asciz	"x"                             # string offset=33
+.Linfo_string4:
+	.asciz	"main"                          # string offset=35
+.Linfo_string5:
+	.asciz	"argc"                          # string offset=40
+.Linfo_string6:
+	.asciz	"argv"                          # string offset=45
+.Linfo_string7:
+	.asciz	"char"                          # string offset=50
+.Linfo_string8:
+	.asciz	"sum"                           # string offset=55
+.Linfo_string9:
+	.asciz	"i"                             # string offset=59
+.Linfo_string10:
+	.asciz	"clang version 16.0.6" # string offset=61
+.Linfo_string11:
+	.asciz	"main.cpp"                      # string offset=167
+.Linfo_string12:
+	.asciz	"main.dwo"                      # string offset=176
+	.section	.debug_str_offsets.dwo,"e", at progbits
+	.long	0
+	.long	17
+	.long	29
+	.long	33
+	.long	35
+	.long	40
+	.long	45
+	.long	50
+	.long	55
+	.long	59
+	.long	61
+	.long	167
+	.long	176
+	.section	.debug_info.dwo,"e", at progbits
+	.long	.Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 # Length of Unit
+.Ldebug_info_dwo_start0:
+	.short	5                               # DWARF version number
+	.byte	5                               # DWARF Unit Type
+	.byte	8                               # Address Size (in bytes)
+	.long	0                               # Offset Into Abbrev. Section
+	.quad	-1861901018463438211
+	.byte	1                               # Abbrev [1] 0x14:0x71 DW_TAG_compile_unit
+	.byte	10                              # DW_AT_producer
+	.short	33                              # DW_AT_language
+	.byte	11                              # DW_AT_name
+	.byte	12                              # DW_AT_dwo_name
+	.byte	2                               # Abbrev [2] 0x1a:0x12 DW_TAG_subprogram
+	.byte	0                               # DW_AT_linkage_name
+	.byte	1                               # DW_AT_name
+	.byte	2                               # DW_AT_decl_file
+	.byte	1                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+                                        # DW_AT_external
+                                        # DW_AT_inline
+	.byte	3                               # Abbrev [3] 0x23:0x8 DW_TAG_formal_parameter
+	.byte	3                               # DW_AT_name
+	.byte	2                               # DW_AT_decl_file
+	.byte	1                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+	.byte	0                               # End Of Children Mark
+	.byte	4                               # Abbrev [4] 0x2c:0x4 DW_TAG_base_type
+	.byte	2                               # DW_AT_name
+	.byte	5                               # DW_AT_encoding
+	.byte	4                               # DW_AT_byte_size
+	.byte	5                               # Abbrev [5] 0x30:0x46 DW_TAG_subprogram
+	.byte	0                               # DW_AT_low_pc
+	.long	.Lfunc_end0-.Lfunc_begin0       # DW_AT_high_pc
+	.byte	1                               # DW_AT_frame_base
+	.byte	87
+                                        # DW_AT_call_all_calls
+	.byte	4                               # DW_AT_name
+	.byte	1                               # DW_AT_decl_file
+	.byte	5                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+                                        # DW_AT_external
+	.byte	6                               # Abbrev [6] 0x3f:0xa DW_TAG_formal_parameter
+	.byte	1                               # DW_AT_location
+	.byte	85
+	.byte	5                               # DW_AT_name
+	.byte	1                               # DW_AT_decl_file
+	.byte	5                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+	.byte	6                               # Abbrev [6] 0x49:0xa DW_TAG_formal_parameter
+	.byte	1                               # DW_AT_location
+	.byte	84
+	.byte	6                               # DW_AT_name
+	.byte	1                               # DW_AT_decl_file
+	.byte	5                               # DW_AT_decl_line
+	.long	118                             # DW_AT_type
+	.byte	7                               # Abbrev [7] 0x53:0x9 DW_TAG_variable
+	.byte	0                               # DW_AT_const_value
+	.byte	8                               # DW_AT_name
+	.byte	1                               # DW_AT_decl_file
+	.byte	6                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+	.byte	8                               # Abbrev [8] 0x5c:0x19 DW_TAG_lexical_block
+	.byte	1                               # DW_AT_low_pc
+	.long	.Ltmp20-.Ltmp2                  # DW_AT_high_pc
+	.byte	7                               # Abbrev [7] 0x62:0x9 DW_TAG_variable
+	.byte	0                               # DW_AT_const_value
+	.byte	9                               # DW_AT_name
+	.byte	1                               # DW_AT_decl_file
+	.byte	7                               # DW_AT_decl_line
+	.long	44                              # DW_AT_type
+	.byte	9                               # Abbrev [9] 0x6b:0x9 DW_TAG_inlined_subroutine
+	.long	26                              # DW_AT_abstract_origin
+	.byte	0                               # DW_AT_ranges
+	.byte	1                               # DW_AT_call_file
+	.byte	8                               # DW_AT_call_line
+	.byte	16                              # DW_AT_call_column
+	.byte	0                               # End Of Children Mark
+	.byte	0                               # End Of Children Mark
+	.byte	10                              # Abbrev [10] 0x76:0x5 DW_TAG_pointer_type
+	.long	123                             # DW_AT_type
+	.byte	10                              # Abbrev [10] 0x7b:0x5 DW_TAG_pointer_type
+	.long	128                             # DW_AT_type
+	.byte	4                               # Abbrev [4] 0x80:0x4 DW_TAG_base_type
+	.byte	7                               # DW_AT_name
+	.byte	6                               # DW_AT_encoding
+	.byte	1                               # DW_AT_byte_size
+	.byte	0                               # End Of Children Mark
+.Ldebug_info_dwo_end0:
+	.section	.debug_abbrev.dwo,"e", at progbits
+	.byte	1                               # Abbreviation Code
+	.byte	17                              # DW_TAG_compile_unit
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	37                              # DW_AT_producer
+	.byte	37                              # DW_FORM_strx1
+	.byte	19                              # DW_AT_language
+	.byte	5                               # DW_FORM_data2
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	118                             # DW_AT_dwo_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	2                               # Abbreviation Code
+	.byte	46                              # DW_TAG_subprogram
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	110                             # DW_AT_linkage_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	58                              # DW_AT_decl_file
+	.byte	11                              # DW_FORM_data1
+	.byte	59                              # DW_AT_decl_line
+	.byte	11                              # DW_FORM_data1
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	63                              # DW_AT_external
+	.byte	25                              # DW_FORM_flag_present
+	.byte	32                              # DW_AT_inline
+	.byte	33                              # DW_FORM_implicit_const
+	.byte	1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	3                               # Abbreviation Code
+	.byte	5                               # DW_TAG_formal_parameter
+	.byte	0                               # DW_CHILDREN_no
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	58                              # DW_AT_decl_file
+	.byte	11                              # DW_FORM_data1
+	.byte	59                              # DW_AT_decl_line
+	.byte	11                              # DW_FORM_data1
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	4                               # Abbreviation Code
+	.byte	36                              # DW_TAG_base_type
+	.byte	0                               # DW_CHILDREN_no
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	62                              # DW_AT_encoding
+	.byte	11                              # DW_FORM_data1
+	.byte	11                              # DW_AT_byte_size
+	.byte	11                              # DW_FORM_data1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	5                               # Abbreviation Code
+	.byte	46                              # DW_TAG_subprogram
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	17                              # DW_AT_low_pc
+	.byte	27                              # DW_FORM_addrx
+	.byte	18                              # DW_AT_high_pc
+	.byte	6                               # DW_FORM_data4
+	.byte	64                              # DW_AT_frame_base
+	.byte	24                              # DW_FORM_exprloc
+	.byte	122                             # DW_AT_call_all_calls
+	.byte	25                              # DW_FORM_flag_present
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	58                              # DW_AT_decl_file
+	.byte	11                              # DW_FORM_data1
+	.byte	59                              # DW_AT_decl_line
+	.byte	11                              # DW_FORM_data1
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	63                              # DW_AT_external
+	.byte	25                              # DW_FORM_flag_present
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	6                               # Abbreviation Code
+	.byte	5                               # DW_TAG_formal_parameter
+	.byte	0                               # DW_CHILDREN_no
+	.byte	2                               # DW_AT_location
+	.byte	24                              # DW_FORM_exprloc
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	58                              # DW_AT_decl_file
+	.byte	11                              # DW_FORM_data1
+	.byte	59                              # DW_AT_decl_line
+	.byte	11                              # DW_FORM_data1
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	7                               # Abbreviation Code
+	.byte	52                              # DW_TAG_variable
+	.byte	0                               # DW_CHILDREN_no
+	.byte	28                              # DW_AT_const_value
+	.byte	13                              # DW_FORM_sdata
+	.byte	3                               # DW_AT_name
+	.byte	37                              # DW_FORM_strx1
+	.byte	58                              # DW_AT_decl_file
+	.byte	11                              # DW_FORM_data1
+	.byte	59                              # DW_AT_decl_line
+	.byte	11                              # DW_FORM_data1
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	8                               # Abbreviation Code
+	.byte	11                              # DW_TAG_lexical_block
+	.byte	1                               # DW_CHILDREN_yes
+	.byte	17                              # DW_AT_low_pc
+	.byte	27                              # DW_FORM_addrx
+	.byte	18                              # DW_AT_high_pc
+	.byte	6                               # DW_FORM_data4
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	9                               # Abbreviation Code
+	.byte	29                              # DW_TAG_inlined_subroutine
+	.byte	0                               # DW_CHILDREN_no
+	.byte	49                              # DW_AT_abstract_origin
+	.byte	19                              # DW_FORM_ref4
+	.byte	85                              # DW_AT_ranges
+	.byte	35                              # DW_FORM_rnglistx
+	.byte	88                              # DW_AT_call_file
+	.byte	11                              # DW_FORM_data1
+	.byte	89                              # DW_AT_call_line
+	.byte	11                              # DW_FORM_data1
+	.byte	87                              # DW_AT_call_column
+	.byte	11                              # DW_FORM_data1
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	10                              # Abbreviation Code
+	.byte	15                              # DW_TAG_pointer_type
+	.byte	0                               # DW_CHILDREN_no
+	.byte	73                              # DW_AT_type
+	.byte	19                              # DW_FORM_ref4
+	.byte	0                               # EOM(1)
+	.byte	0                               # EOM(2)
+	.byte	0                               # EOM(3)
+	.section	.debug_rnglists.dwo,"e", at progbits
+	.long	.Ldebug_list_header_end1-.Ldebug_list_header_start1 # Length
+.Ldebug_list_header_start1:
+	.short	5                               # Version
+	.byte	8                               # Address size
+	.byte	0                               # Segment selector size
+	.long	1                               # Offset entry count
+.Lrnglists_dwo_table_base0:
+	.long	.Ldebug_ranges0-.Lrnglists_dwo_table_base0
+.Ldebug_ranges0:
+	.byte	4                               # DW_RLE_offset_pair
+	.uleb128 .Ltmp2-.Lfunc_begin0           #   starting offset
+	.uleb128 .Ltmp15-.Lfunc_begin0          #   ending offset
+	.byte	4                               # DW_RLE_offset_pair
+	.uleb128 .Ltmp16-.Lfunc_begin0          #   starting offset
+	.uleb128 .Ltmp17-.Lfunc_begin0          #   ending offset
+	.byte	0                               # DW_RLE_end_of_list
+.Ldebug_list_header_end1:
+	.section	.debug_addr,"", at progbits
+	.long	.Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+	.short	5                               # DWARF version number
+	.byte	8                               # Address size
+	.byte	0                               # Segment selector size
+.Laddr_table_base0:
+	.quad	.Lfunc_begin0
+	.quad	.Ltmp2
+.Ldebug_addr_end0:
+	.section	.debug_gnu_pubnames,"", at progbits
+	.long	.LpubNames_end0-.LpubNames_start0 # Length of Public Names Info
+.LpubNames_start0:
+	.short	2                               # DWARF Version
+	.long	.Lcu_begin0                     # Offset of Compilation Unit Info
+	.long	62                              # Compilation Unit Length
+	.long	26                              # DIE offset
+	.byte	48                              # Attributes: FUNCTION, EXTERNAL
+	.asciz	"hotFunction"                   # External Name
+	.long	48                              # DIE offset
+	.byte	48                              # Attributes: FUNCTION, EXTERNAL
+	.asciz	"main"                          # External Name
+	.long	0                               # End Mark
+.LpubNames_end0:
+	.section	.debug_gnu_pubtypes,"", at progbits
+	.long	.LpubTypes_end0-.LpubTypes_start0 # Length of Public Types Info
+.LpubTypes_start0:
+	.short	2                               # DWARF Version
+	.long	.Lcu_begin0                     # Offset of Compilation Unit Info
+	.long	62                              # Compilation Unit Length
+	.long	44                              # DIE offset
+	.byte	144                             # Attributes: TYPE, STATIC
+	.asciz	"int"                           # External Name
+	.long	128                             # DIE offset
+	.byte	144                             # Attributes: TYPE, STATIC
+	.asciz	"char"                          # External Name
+	.long	0                               # End Mark
+.LpubTypes_end0:
+	.section	.debug_gnu_pubnames,"", at progbits
+	.long	.LpubNames_end1-.LpubNames_start1 # Length of Public Names Info
+.LpubNames_start1:
+	.short	2                               # DWARF Version
+	.long	.Lcu_begin1                     # Offset of Compilation Unit Info
+	.long	41                              # Compilation Unit Length
+	.long	0                               # End Mark
+.LpubNames_end1:
+	.section	.debug_gnu_pubtypes,"", at progbits
+	.long	.LpubTypes_end1-.LpubTypes_start1 # Length of Public Types Info
+.LpubTypes_start1:
+	.short	2                               # DWARF Version
+	.long	.Lcu_begin1                     # Offset of Compilation Unit Info
+	.long	41                              # Compilation Unit Length
+	.long	0                               # End Mark
+.LpubTypes_end1:
+	.ident	"clang version 16.0.6"
+	.ident	"clang version 16.0.6"
+	.section	.GCC.command.line,"MS", at progbits,1
+	.zero	1
+	.ascii	""
+	.zero	1
+	.ascii	""
+	.zero	1
+	.section	.debug_gnu_pubtypes,"", at progbits
+	.section	".note.GNU-stack","", at progbits
+	.addrsig
+	.section	.debug_line,"", at progbits
+.Lline_table_start0:

>From 793061125596e93a5c87ab88180ed7f0b47a55a9 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Tue, 23 Sep 2025 09:36:09 +0100
Subject: [PATCH 21/42] [AMDGPU] Skip debug instructions in
 SIShrinkInstructions::matchSwap (#160123)

---
 llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp | 7 ++++++-
 llvm/test/CodeGen/AMDGPU/v_swap_b32.mir         | 1 +
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 7a519117f2482..179ecbad5239f 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -712,10 +712,13 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
   bool KilledT = false;
   for (auto Iter = std::next(MovT.getIterator()),
             E = MovT.getParent()->instr_end();
-       Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
+       Iter != E && Count < SearchLimit && !KilledT; ++Iter) {
 
     MachineInstr *MovY = &*Iter;
     KilledT = MovY->killsRegister(T, TRI);
+    if (MovY->isDebugInstr())
+      continue;
+    ++Count;
 
     if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
          MovY->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
@@ -733,6 +736,8 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
     MachineInstr *MovX = nullptr;
     for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
          I != IY; ++I) {
+      if (I->isDebugInstr())
+        continue;
       if (instReadsReg(&*I, X, Xsub) || instModifiesReg(&*I, Y, Ysub) ||
           instModifiesReg(&*I, T, Tsub) ||
           (MovX && instModifiesReg(&*I, X, Xsub))) {
diff --git a/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir b/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
index 95aaea6ea8091..27229cd518028 100644
--- a/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
+++ b/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
@@ -1,4 +1,5 @@
 # RUN: llc -simplify-mir -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-shrink-instructions -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
+# RUN: llc -simplify-mir -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-shrink-instructions -verify-machineinstrs %s -o - -debugify-and-strip-all-safe | FileCheck -check-prefix=GCN %s
 # RUN: llc -simplify-mir -mtriple=amdgcn -mcpu=gfx900 -passes=si-shrink-instructions -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: swap_phys_condensed

>From 1bec36ece78b0a971ee62fdb965d78c46f8f5f8d Mon Sep 17 00:00:00 2001
From: David Sherwood <david.sherwood at arm.com>
Date: Tue, 23 Sep 2025 09:37:47 +0100
Subject: [PATCH 22/42] [LLVM] Volunteer myself as a secondary maintainer for
 LoopVectorizer (#120704)

I now have the time and am willing to help out with code reviews, tidy
up tests, clean up code, etc. Over the last few years I've done quite a
lot of vectoriser work, which includes adding support for scalable
vectors with tail-folding and this year working towards vectorisation of
loops with uncountable early exits. I've also been actively engaged with
reviewing upstream loop vectoriser patches, and submitting NFC clean-up
patches.

Now that we can have a list of maintainers per area I thought perhaps
it's worth formalising things by adding myself as a secondary maintainer
if others are happy?

Not entirely sure who to add as reviewers on this PR though!
---
 llvm/Maintainers.md | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/llvm/Maintainers.md b/llvm/Maintainers.md
index 0c976048a8d26..5afdd1519c96f 100644
--- a/llvm/Maintainers.md
+++ b/llvm/Maintainers.md
@@ -87,7 +87,9 @@ quentin.colombet at gmail.com (email), [qcolombet](https://github.com/qcolombet) (G
 #### LoopVectorize
 
 Florian Hahn \
-flo at fhahn.com (email), [fhahn](https://github.com/fhahn) (GitHub)
+flo at fhahn.com (email), [fhahn](https://github.com/fhahn) (GitHub) \
+David Sherwood \
+david.sherwood at arm.com (email), [david-arm](https://github.com/david-arm) (GitHub)
 
 #### MemorySSA
 

>From 41c8c5359ce9b1493306210302e04d3fbeb8ffb0 Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Tue, 23 Sep 2025 09:41:45 +0100
Subject: [PATCH 23/42] [AMDGPU] Add PAL metadata names for 32 user SGPRs
 (#160126)

Since #154205 some subtargets can use up to 32 user SGPRs. Add names for
them all so they can be pretty printed in PAL metadata.
---
 .../Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp | 16 ++++++++++
 llvm/test/MC/AMDGPU/pal-registers.s           | 32 +++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp
index a7a0e33da5e4a..8e601ad8a48fd 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp
@@ -681,6 +681,22 @@ static const char *getRegisterName(unsigned RegNum) {
       {0x2e4d, "COMPUTE_USER_DATA_13"},
       {0x2e4e, "COMPUTE_USER_DATA_14"},
       {0x2e4f, "COMPUTE_USER_DATA_15"},
+      {0x2e50, "COMPUTE_USER_DATA_16"},
+      {0x2e51, "COMPUTE_USER_DATA_17"},
+      {0x2e52, "COMPUTE_USER_DATA_18"},
+      {0x2e53, "COMPUTE_USER_DATA_19"},
+      {0x2e54, "COMPUTE_USER_DATA_20"},
+      {0x2e55, "COMPUTE_USER_DATA_21"},
+      {0x2e56, "COMPUTE_USER_DATA_22"},
+      {0x2e57, "COMPUTE_USER_DATA_23"},
+      {0x2e58, "COMPUTE_USER_DATA_24"},
+      {0x2e59, "COMPUTE_USER_DATA_25"},
+      {0x2e5a, "COMPUTE_USER_DATA_26"},
+      {0x2e5b, "COMPUTE_USER_DATA_27"},
+      {0x2e5c, "COMPUTE_USER_DATA_28"},
+      {0x2e5d, "COMPUTE_USER_DATA_29"},
+      {0x2e5e, "COMPUTE_USER_DATA_30"},
+      {0x2e5f, "COMPUTE_USER_DATA_31"},
 
       {0x2e07, "COMPUTE_NUM_THREAD_X"},
       {0x2e08, "COMPUTE_NUM_THREAD_Y"},
diff --git a/llvm/test/MC/AMDGPU/pal-registers.s b/llvm/test/MC/AMDGPU/pal-registers.s
index 3080518b7eb0a..839b3126a131b 100644
--- a/llvm/test/MC/AMDGPU/pal-registers.s
+++ b/llvm/test/MC/AMDGPU/pal-registers.s
@@ -258,6 +258,22 @@ amdpal.pipelines:
       0x2e4d: 0
       0x2e4e: 0
       0x2e4f: 0
+      0x2e50: 0
+      0x2e51: 0
+      0x2e52: 0
+      0x2e53: 0
+      0x2e54: 0
+      0x2e55: 0
+      0x2e56: 0
+      0x2e57: 0
+      0x2e58: 0
+      0x2e59: 0
+      0x2e5a: 0
+      0x2e5b: 0
+      0x2e5c: 0
+      0x2e5d: 0
+      0x2e5e: 0
+      0x2e5f: 0
       0xa08f: 0
       0xa191: 0
       0xa192: 0
@@ -596,6 +612,22 @@ amdpal.pipelines:
 // CHECK: 0x2e4d (COMPUTE_USER_DATA_13)
 // CHECK: 0x2e4e (COMPUTE_USER_DATA_14)
 // CHECK: 0x2e4f (COMPUTE_USER_DATA_15)
+// CHECK: 0x2e50 (COMPUTE_USER_DATA_16)
+// CHECK: 0x2e51 (COMPUTE_USER_DATA_17)
+// CHECK: 0x2e52 (COMPUTE_USER_DATA_18)
+// CHECK: 0x2e53 (COMPUTE_USER_DATA_19)
+// CHECK: 0x2e54 (COMPUTE_USER_DATA_20)
+// CHECK: 0x2e55 (COMPUTE_USER_DATA_21)
+// CHECK: 0x2e56 (COMPUTE_USER_DATA_22)
+// CHECK: 0x2e57 (COMPUTE_USER_DATA_23)
+// CHECK: 0x2e58 (COMPUTE_USER_DATA_24)
+// CHECK: 0x2e59 (COMPUTE_USER_DATA_25)
+// CHECK: 0x2e5a (COMPUTE_USER_DATA_26)
+// CHECK: 0x2e5b (COMPUTE_USER_DATA_27)
+// CHECK: 0x2e5c (COMPUTE_USER_DATA_28)
+// CHECK: 0x2e5d (COMPUTE_USER_DATA_29)
+// CHECK: 0x2e5e (COMPUTE_USER_DATA_30)
+// CHECK: 0x2e5f (COMPUTE_USER_DATA_31)
 // CHECK: 0xa08f (CB_SHADER_MASK)
 // CHECK: 0xa191 (SPI_PS_INPUT_CNTL_0)
 // CHECK: 0xa192 (SPI_PS_INPUT_CNTL_1)

>From 23a3347c4a1c55a4e1d2c82434330de35a9dfe69 Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Tue, 23 Sep 2025 09:44:27 +0100
Subject: [PATCH 24/42] [ASan][test-only] Allow Darwin test
 duplicate_os_log_reports to retry (#158662)

Currently this test is flaky. I believe that it may be because the logs
have not flushed to disk before the `log show` command is run.

This patch allows the test to retry reading the log twice more with a 5
second sleep inbetween.
---
 .../asan/TestCases/Darwin/duplicate_os_log_reports.cpp   | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/compiler-rt/test/asan/TestCases/Darwin/duplicate_os_log_reports.cpp b/compiler-rt/test/asan/TestCases/Darwin/duplicate_os_log_reports.cpp
index 0091ebc09205c..5a0353bfb1b31 100644
--- a/compiler-rt/test/asan/TestCases/Darwin/duplicate_os_log_reports.cpp
+++ b/compiler-rt/test/asan/TestCases/Darwin/duplicate_os_log_reports.cpp
@@ -8,8 +8,13 @@
 // RUN: FileCheck %s --check-prefixes CHECK,CHECK-PROC -input-file=%t.process_output.txt
 
 // Check syslog output. We filter recent system logs based on PID to avoid
-// getting the logs of previous test runs.
-// RUN: log show --debug --last 5m  --predicate "processID == ${TEST_PID}" --style syslog > %t.process_syslog_output.txt
+// getting the logs of previous test runs. Make some reattempts in case there
+// is a delay.
+// RUN: for I in {1..3}; do \
+// RUN:   log show --debug --last $((SECONDS + 30))s --predicate "processID == ${TEST_PID}" --style syslog > %t.process_syslog_output.txt; \
+// RUN:   if grep -q "use-after-poison" %t.process_syslog_output.txt; then break; fi; \
+// RUN:   sleep 5; \
+// RUN: done
 // RUN: FileCheck %s -input-file=%t.process_syslog_output.txt
 #include <cassert>
 #include <cstdio>

>From 08b4c5f13051907e67dfe56810b52e270259f783 Mon Sep 17 00:00:00 2001
From: Elizaveta Noskova <159026035+enoskova-sc at users.noreply.github.com>
Date: Tue, 23 Sep 2025 11:54:52 +0300
Subject: [PATCH 25/42] [MIR] Support save/restore points with independent sets
 of registers (#119358)

This patch adds the MIR parsing and serialization support for save and
restore points with subsets of callee saved registers. That is, it
syntactically allows a function to contain two or more distinct
sub-regions in which distinct subsets of registers are spilled/filled as
callee save. This is useful if e.g. one of the CSRs isn't modified in
one of the sub-regions, but is in the other(s).

Support for actually using this capability in code generation is still
forthcoming. This patch is the next logical step for multiple
save/restore points support.

All points are now stored in DenseMap from MBB to vector of
CalleeSavedInfo.

Shrink-Wrap points split Part 4.
RFC:
https://discourse.llvm.org/t/shrink-wrap-save-restore-points-splitting/83581

Part 1: https://github.com/llvm/llvm-project/pull/117862 (landed)
Part 2: https://github.com/llvm/llvm-project/pull/119355 (landed)
Part 3: https://github.com/llvm/llvm-project/pull/119357 (landed)
Part 5: https://github.com/llvm/llvm-project/pull/119359 (likely to be
further split)
---
 llvm/include/llvm/CodeGen/MIRYamlMapping.h    |  23 ++-
 llvm/include/llvm/CodeGen/MachineFrameInfo.h  |  27 +--
 llvm/lib/CodeGen/MIRParser/MIRParser.cpp      |  20 ++-
 llvm/lib/CodeGen/MIRPrinter.cpp               |  39 ++++-
 llvm/lib/CodeGen/MachineFrameInfo.cpp         |   4 +-
 llvm/lib/CodeGen/PrologEpilogInserter.cpp     |  34 +++-
 llvm/lib/CodeGen/ShrinkWrap.cpp               |  12 +-
 llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp  |   6 +-
 llvm/lib/Target/PowerPC/PPCFrameLowering.cpp  |   3 +-
 ...nfo-multiple-save-restore-points-parse.mir |   4 +
 ...fo-save-restore-points-with-regs-parse.mir | 154 ++++++++++++++++++
 llvm/tools/llvm-reduce/ReducerWorkItem.cpp    |   8 +-
 12 files changed, 283 insertions(+), 51 deletions(-)
 create mode 100644 llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir

diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index a91c26ee1122a..c7304e386b542 100644
--- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -634,19 +634,36 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::CalledGlobal)
 namespace llvm {
 namespace yaml {
 
-// Struct representing one save/restore point in the 'savePoint'/'restorePoint'
-// list
+// Struct representing one save/restore point in the 'savePoint' /
+// 'restorePoint' list. One point consists of machine basic block name and list
+// of registers saved/restored in this basic block. In MIR it looks like:
+//  savePoint:
+//    - point:           '%bb.1'
+//      registers:
+//        - '$rbx'
+//        - '$r12'
+//        ...
+//  restorePoint:
+//    - point:           '%bb.1'
+//      registers:
+//        - '$rbx'
+//        - '$r12'
+// If no register is saved/restored in the selected BB,
+// field 'registers' is not specified.
 struct SaveRestorePointEntry {
   StringValue Point;
+  std::vector<StringValue> Registers;
 
   bool operator==(const SaveRestorePointEntry &Other) const {
-    return Point == Other.Point;
+    return Point == Other.Point && Registers == Other.Registers;
   }
 };
 
 template <> struct MappingTraits<SaveRestorePointEntry> {
   static void mapping(IO &YamlIO, SaveRestorePointEntry &Entry) {
     YamlIO.mapRequired("point", Entry.Point);
+    YamlIO.mapOptional("registers", Entry.Registers,
+                       std::vector<StringValue>());
   }
 };
 
diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index e666001035deb..00c734330a40b 100644
--- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -76,6 +76,9 @@ class CalleeSavedInfo {
   bool isSpilledToReg()                    const { return SpilledToReg; }
 };
 
+using SaveRestorePoints =
+    DenseMap<MachineBasicBlock *, std::vector<CalleeSavedInfo>>;
+
 /// The MachineFrameInfo class represents an abstract stack frame until
 /// prolog/epilog code is inserted.  This class is key to allowing stack frame
 /// representation optimizations, such as frame pointer elimination.  It also
@@ -333,9 +336,9 @@ class MachineFrameInfo {
   bool HasTailCall = false;
 
   /// Not empty, if shrink-wrapping found a better place for the prologue.
-  SmallVector<MachineBasicBlock *, 4> SavePoints;
+  SaveRestorePoints SavePoints;
   /// Not empty, if shrink-wrapping found a better place for the epilogue.
-  SmallVector<MachineBasicBlock *, 4> RestorePoints;
+  SaveRestorePoints RestorePoints;
 
   /// Size of the UnsafeStack Frame
   uint64_t UnsafeStackSize = 0;
@@ -825,17 +828,21 @@ class MachineFrameInfo {
 
   void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
 
-  ArrayRef<MachineBasicBlock *> getSavePoints() const { return SavePoints; }
-  void setSavePoints(ArrayRef<MachineBasicBlock *> NewSavePoints) {
-    SavePoints.assign(NewSavePoints.begin(), NewSavePoints.end());
-  }
-  ArrayRef<MachineBasicBlock *> getRestorePoints() const {
-    return RestorePoints;
+  const SaveRestorePoints &getRestorePoints() const { return RestorePoints; }
+
+  const SaveRestorePoints &getSavePoints() const { return SavePoints; }
+
+  void setSavePoints(SaveRestorePoints NewSavePoints) {
+    SavePoints = std::move(NewSavePoints);
   }
-  void setRestorePoints(ArrayRef<MachineBasicBlock *> NewRestorePoints) {
-    RestorePoints.assign(NewRestorePoints.begin(), NewRestorePoints.end());
+
+  void setRestorePoints(SaveRestorePoints NewRestorePoints) {
+    RestorePoints = std::move(NewRestorePoints);
   }
 
+  void clearSavePoints() { SavePoints.clear(); }
+  void clearRestorePoints() { RestorePoints.clear(); }
+
   uint64_t getUnsafeStackSize() const { return UnsafeStackSize; }
   void setUnsafeStackSize(uint64_t Size) { UnsafeStackSize = Size; }
 
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index bb70e7805e818..0f792b0ef206c 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -127,7 +127,7 @@ class MIRParserImpl {
   bool initializeSaveRestorePoints(
       PerFunctionMIParsingState &PFS,
       const std::vector<yaml::SaveRestorePointEntry> &YamlSRPoints,
-      SmallVectorImpl<MachineBasicBlock *> &SaveRestorePoints);
+      llvm::SaveRestorePoints &SaveRestorePoints);
 
   bool initializeCallSiteInfo(PerFunctionMIParsingState &PFS,
                               const yaml::MachineFunction &YamlMF);
@@ -872,11 +872,11 @@ bool MIRParserImpl::initializeFrameInfo(PerFunctionMIParsingState &PFS,
   MFI.setHasTailCall(YamlMFI.HasTailCall);
   MFI.setCalleeSavedInfoValid(YamlMFI.IsCalleeSavedInfoValid);
   MFI.setLocalFrameSize(YamlMFI.LocalFrameSize);
-  SmallVector<MachineBasicBlock *, 4> SavePoints;
+  llvm::SaveRestorePoints SavePoints;
   if (initializeSaveRestorePoints(PFS, YamlMFI.SavePoints, SavePoints))
     return true;
   MFI.setSavePoints(SavePoints);
-  SmallVector<MachineBasicBlock *, 4> RestorePoints;
+  llvm::SaveRestorePoints RestorePoints;
   if (initializeSaveRestorePoints(PFS, YamlMFI.RestorePoints, RestorePoints))
     return true;
   MFI.setRestorePoints(RestorePoints);
@@ -1098,14 +1098,22 @@ bool MIRParserImpl::initializeConstantPool(PerFunctionMIParsingState &PFS,
 bool MIRParserImpl::initializeSaveRestorePoints(
     PerFunctionMIParsingState &PFS,
     const std::vector<yaml::SaveRestorePointEntry> &YamlSRPoints,
-    SmallVectorImpl<MachineBasicBlock *> &SaveRestorePoints) {
+    llvm::SaveRestorePoints &SaveRestorePoints) {
+  SMDiagnostic Error;
   MachineBasicBlock *MBB = nullptr;
   for (const yaml::SaveRestorePointEntry &Entry : YamlSRPoints) {
     if (parseMBBReference(PFS, MBB, Entry.Point.Value))
       return true;
-    SaveRestorePoints.push_back(MBB);
-  }
 
+    std::vector<CalleeSavedInfo> Registers;
+    for (auto &RegStr : Entry.Registers) {
+      Register Reg;
+      if (parseNamedRegisterReference(PFS, Reg, RegStr.Value, Error))
+        return error(Error, RegStr.SourceRange);
+      Registers.push_back(CalleeSavedInfo(Reg));
+    }
+    SaveRestorePoints.try_emplace(MBB, std::move(Registers));
+  }
   return false;
 }
 
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 91a21a4adf4eb..bf8a6cdf097a9 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -149,11 +149,13 @@ static void convertMCP(yaml::MachineFunction &MF,
 static void convertMJTI(ModuleSlotTracker &MST, yaml::MachineJumpTable &YamlJTI,
                         const MachineJumpTableInfo &JTI);
 static void convertMFI(ModuleSlotTracker &MST, yaml::MachineFrameInfo &YamlMFI,
-                       const MachineFrameInfo &MFI);
+                       const MachineFrameInfo &MFI,
+                       const TargetRegisterInfo *TRI);
 static void
 convertSRPoints(ModuleSlotTracker &MST,
                 std::vector<yaml::SaveRestorePointEntry> &YamlSRPoints,
-                ArrayRef<MachineBasicBlock *> SaveRestorePoints);
+                const llvm::SaveRestorePoints &SRPoints,
+                const TargetRegisterInfo *TRI);
 static void convertStackObjects(yaml::MachineFunction &YMF,
                                 const MachineFunction &MF,
                                 ModuleSlotTracker &MST, MFPrintState &State);
@@ -204,7 +206,8 @@ static void printMF(raw_ostream &OS, const MachineModuleInfo &MMI,
   convertMRI(YamlMF, MF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
   MachineModuleSlotTracker &MST = State.MST;
   MST.incorporateFunction(MF.getFunction());
-  convertMFI(MST, YamlMF.FrameInfo, MF.getFrameInfo());
+  convertMFI(MST, YamlMF.FrameInfo, MF.getFrameInfo(),
+             MF.getSubtarget().getRegisterInfo());
   convertStackObjects(YamlMF, MF, MST, State);
   convertEntryValueObjects(YamlMF, MF, MST);
   convertCallSiteObjects(YamlMF, MF, MST);
@@ -339,7 +342,8 @@ static void convertMRI(yaml::MachineFunction &YamlMF, const MachineFunction &MF,
 }
 
 static void convertMFI(ModuleSlotTracker &MST, yaml::MachineFrameInfo &YamlMFI,
-                       const MachineFrameInfo &MFI) {
+                       const MachineFrameInfo &MFI,
+                       const TargetRegisterInfo *TRI) {
   YamlMFI.IsFrameAddressTaken = MFI.isFrameAddressTaken();
   YamlMFI.IsReturnAddressTaken = MFI.isReturnAddressTaken();
   YamlMFI.HasStackMap = MFI.hasStackMap();
@@ -360,9 +364,9 @@ static void convertMFI(ModuleSlotTracker &MST, yaml::MachineFrameInfo &YamlMFI,
   YamlMFI.IsCalleeSavedInfoValid = MFI.isCalleeSavedInfoValid();
   YamlMFI.LocalFrameSize = MFI.getLocalFrameSize();
   if (!MFI.getSavePoints().empty())
-    convertSRPoints(MST, YamlMFI.SavePoints, MFI.getSavePoints());
+    convertSRPoints(MST, YamlMFI.SavePoints, MFI.getSavePoints(), TRI);
   if (!MFI.getRestorePoints().empty())
-    convertSRPoints(MST, YamlMFI.RestorePoints, MFI.getRestorePoints());
+    convertSRPoints(MST, YamlMFI.RestorePoints, MFI.getRestorePoints(), TRI);
 }
 
 static void convertEntryValueObjects(yaml::MachineFunction &YMF,
@@ -619,16 +623,35 @@ static void convertMCP(yaml::MachineFunction &MF,
 static void
 convertSRPoints(ModuleSlotTracker &MST,
                 std::vector<yaml::SaveRestorePointEntry> &YamlSRPoints,
-                ArrayRef<MachineBasicBlock *> SRPoints) {
-  for (const auto &MBB : SRPoints) {
+                const llvm::SaveRestorePoints &SRPoints,
+                const TargetRegisterInfo *TRI) {
+  for (const auto &[MBB, CSInfos] : SRPoints) {
     SmallString<16> Str;
     yaml::SaveRestorePointEntry Entry;
     raw_svector_ostream StrOS(Str);
     StrOS << printMBBReference(*MBB);
     Entry.Point = StrOS.str().str();
     Str.clear();
+    for (const CalleeSavedInfo &Info : CSInfos) {
+      if (Info.getReg()) {
+        StrOS << printReg(Info.getReg(), TRI);
+        Entry.Registers.push_back(StrOS.str().str());
+        Str.clear();
+      }
+    }
+    // Sort here needed for stable output for lit tests
+    std::sort(Entry.Registers.begin(), Entry.Registers.end(),
+              [](const yaml::StringValue &Lhs, const yaml::StringValue &Rhs) {
+                return Lhs.Value < Rhs.Value;
+              });
     YamlSRPoints.push_back(std::move(Entry));
   }
+  // Sort here needed for stable output for lit tests
+  std::sort(YamlSRPoints.begin(), YamlSRPoints.end(),
+            [](const yaml::SaveRestorePointEntry &Lhs,
+               const yaml::SaveRestorePointEntry &Rhs) {
+              return Lhs.Point.Value < Rhs.Point.Value;
+            });
 }
 
 static void convertMJTI(ModuleSlotTracker &MST, yaml::MachineJumpTable &YamlJTI,
diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp
index a8306b2ef2e5b..aed68afb4eb1b 100644
--- a/llvm/lib/CodeGen/MachineFrameInfo.cpp
+++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp
@@ -250,14 +250,14 @@ void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
     OS << "save points:\n";
 
     for (auto &item : SavePoints)
-      OS << printMBBReference(*item) << "\n";
+      OS << printMBBReference(*item.first) << "\n";
   } else
     OS << "save points are empty\n";
 
   if (!RestorePoints.empty()) {
     OS << "restore points:\n";
     for (auto &item : RestorePoints)
-      OS << printMBBReference(*item) << "\n";
+      OS << printMBBReference(*item.first) << "\n";
   } else
     OS << "restore points are empty\n";
 }
diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 8fc0748ecc0e3..0be75e073dedd 100644
--- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -351,8 +351,8 @@ bool PEIImpl::run(MachineFunction &MF) {
   delete RS;
   SaveBlocks.clear();
   RestoreBlocks.clear();
-  MFI.setSavePoints({});
-  MFI.setRestorePoints({});
+  MFI.clearSavePoints();
+  MFI.clearRestorePoints();
   return true;
 }
 
@@ -431,10 +431,12 @@ void PEIImpl::calculateSaveRestoreBlocks(MachineFunction &MF) {
   if (!MFI.getSavePoints().empty()) {
     assert(MFI.getSavePoints().size() == 1 &&
            "Multiple save points are not yet supported!");
-    SaveBlocks.push_back(MFI.getSavePoints().front());
+    const auto &SavePoint = *MFI.getSavePoints().begin();
+    SaveBlocks.push_back(SavePoint.first);
     assert(MFI.getRestorePoints().size() == 1 &&
            "Multiple restore points are not yet supported!");
-    MachineBasicBlock *RestoreBlock = MFI.getRestorePoints().front();
+    const auto &RestorePoint = *MFI.getRestorePoints().begin();
+    MachineBasicBlock *RestoreBlock = RestorePoint.first;
     // If RestoreBlock does not have any successor and is not a return block
     // then the end point is unreachable and we do not need to insert any
     // epilogue.
@@ -563,8 +565,9 @@ static void updateLiveness(MachineFunction &MF) {
 
   assert(MFI.getSavePoints().size() < 2 &&
          "Multiple save points not yet supported!");
-  MachineBasicBlock *Save =
-      MFI.getSavePoints().empty() ? nullptr : MFI.getSavePoints().front();
+  MachineBasicBlock *Save = MFI.getSavePoints().empty()
+                                ? nullptr
+                                : (*MFI.getSavePoints().begin()).first;
 
   if (!Save)
     Save = Entry;
@@ -577,8 +580,9 @@ static void updateLiveness(MachineFunction &MF) {
 
   assert(MFI.getRestorePoints().size() < 2 &&
          "Multiple restore points not yet supported!");
-  MachineBasicBlock *Restore =
-      MFI.getRestorePoints().empty() ? nullptr : MFI.getRestorePoints().front();
+  MachineBasicBlock *Restore = MFI.getRestorePoints().empty()
+                                   ? nullptr
+                                   : (*MFI.getRestorePoints().begin()).first;
   if (Restore)
     // By construction Restore cannot be visited, otherwise it
     // means there exists a path to Restore that does not go
@@ -687,6 +691,20 @@ void PEIImpl::spillCalleeSavedRegs(MachineFunction &MF) {
     MFI.setCalleeSavedInfoValid(true);
 
     std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+
+    // Fill SavePoints and RestorePoints with CalleeSavedRegisters
+    if (!MFI.getSavePoints().empty()) {
+      SaveRestorePoints SaveRestorePts;
+      for (const auto &SavePoint : MFI.getSavePoints())
+        SaveRestorePts.insert({SavePoint.first, CSI});
+      MFI.setSavePoints(std::move(SaveRestorePts));
+
+      SaveRestorePts.clear();
+      for (const auto &RestorePoint : MFI.getRestorePoints())
+        SaveRestorePts.insert({RestorePoint.first, CSI});
+      MFI.setRestorePoints(std::move(SaveRestorePts));
+    }
+
     if (!CSI.empty()) {
       if (!MFI.hasCalls())
         NumLeafFuncWithSpills++;
diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp
index 938f2d756bc87..826e4126de44c 100644
--- a/llvm/lib/CodeGen/ShrinkWrap.cpp
+++ b/llvm/lib/CodeGen/ShrinkWrap.cpp
@@ -967,12 +967,12 @@ bool ShrinkWrapImpl::run(MachineFunction &MF) {
                     << "\nRestore: " << printMBBReference(*Restore) << '\n');
 
   MachineFrameInfo &MFI = MF.getFrameInfo();
-  SmallVector<MachineBasicBlock *, 4> SavePoints;
-  SmallVector<MachineBasicBlock *, 4> RestorePoints;
-  if (Save) {
-    SavePoints.push_back(Save);
-    RestorePoints.push_back(Restore);
-  }
+
+  // List of CalleeSavedInfo for registers will be added during prologepilog
+  // pass
+  SaveRestorePoints SavePoints({{Save, {}}});
+  SaveRestorePoints RestorePoints({{Restore, {}}});
+
   MFI.setSavePoints(SavePoints);
   MFI.setRestorePoints(RestorePoints);
   ++NumCandidates;
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 1a91bbd433553..40eeeb8a8630d 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -213,10 +213,12 @@ void SILowerSGPRSpills::calculateSaveRestoreBlocks(MachineFunction &MF) {
   if (!MFI.getSavePoints().empty()) {
     assert(MFI.getSavePoints().size() == 1 &&
            "Multiple save points not yet supported!");
-    SaveBlocks.push_back(MFI.getSavePoints().front());
+    const auto &SavePoint = *MFI.getSavePoints().begin();
+    SaveBlocks.push_back(SavePoint.first);
     assert(MFI.getRestorePoints().size() == 1 &&
            "Multiple restore points not yet supported!");
-    MachineBasicBlock *RestoreBlock = MFI.getRestorePoints().front();
+    const auto &RestorePoint = *MFI.getRestorePoints().begin();
+    MachineBasicBlock *RestoreBlock = RestorePoint.first;
     // If RestoreBlock does not have any successor and is not a return block
     // then the end point is unreachable and we do not need to insert any
     // epilogue.
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 2ad3ed21732ed..910bc9d281259 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2081,9 +2081,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
   if (!MFI.getSavePoints().empty() && MFI.hasTailCall()) {
     assert(MFI.getRestorePoints().size() < 2 &&
            "MFI can't contain multiple restore points!");
-    MachineBasicBlock *RestoreBlock = MFI.getRestorePoints().front();
     for (MachineBasicBlock &MBB : MF) {
-      if (MBB.isReturnBlock() && (&MBB) != RestoreBlock)
+      if (MBB.isReturnBlock() && (!MFI.getRestorePoints().contains(&MBB)))
         createTailCallBranchInstr(MBB);
     }
   }
diff --git a/llvm/test/CodeGen/MIR/X86/frame-info-multiple-save-restore-points-parse.mir b/llvm/test/CodeGen/MIR/X86/frame-info-multiple-save-restore-points-parse.mir
index 4c60ccd573595..2cdd6a0bce471 100644
--- a/llvm/test/CodeGen/MIR/X86/frame-info-multiple-save-restore-points-parse.mir
+++ b/llvm/test/CodeGen/MIR/X86/frame-info-multiple-save-restore-points-parse.mir
@@ -32,10 +32,14 @@ liveins:
 # CHECK: frameInfo:
 # CHECK:      savePoint:
 # CHECK-NEXT:   - point:           '%bb.1'
+# CHECK-NEXT:     registers: []
 # CHECK-NEXT:   - point:           '%bb.2'
+# CHECK-NEXT:     registers: []
 # CHECK:      restorePoint:
 # CHECK-NEXT:   - point:           '%bb.2'
+# CHECK-NEXT:     registers: []
 # CHECK-NEXT:   - point:           '%bb.3'
+# CHECK-NEXT:     registers: []
 # CHECK: stack
 frameInfo:
   maxAlignment:  4
diff --git a/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir
new file mode 100644
index 0000000000000..d23f2f7bd585e
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir
@@ -0,0 +1,154 @@
+# RUN: llc -run-pass none -o - %s | FileCheck %s 
+
+--- |
+  define ptr @foo(ptr %ptr, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6) {
+  entry:
+    %tobool.not = icmp eq ptr %ptr, null
+    br i1 %tobool.not, label %if.then, label %if.end
+  
+  if.then:                                          ; preds = %entry
+    %call = tail call ptr @bar(ptr %ptr, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6)
+    br label %if.end
+  
+  if.end:                                           ; preds = %if.then, %entry
+    %ptr.addr.0 = phi ptr [ %call, %if.then ], [ %ptr, %entry ]
+    %incdec.ptr = getelementptr inbounds i8, ptr %ptr.addr.0, i64 1
+    %call2 = tail call ptr @qux(ptr %incdec.ptr, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6)
+    ret ptr %call2
+  }
+  
+  declare ptr @bar(ptr, i64, i64, i64, i64, i64)
+  
+  declare ptr @qux(ptr, i64, i64, i64, i64, i64)
+...
+---
+name:            foo
+alignment:       16
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+noPhis:          true
+isSSA:           false
+noVRegs:         true
+hasFakeUses:     false
+callsEHReturn:   false
+callsUnwindInit: false
+hasEHContTarget: false
+hasEHScopes:     false
+hasEHFunclets:   false
+isOutlined:      false
+debugInstrRef:   true
+failsVerification: false
+tracksDebugUserValues: true
+registers:       []
+liveins:
+  - { reg: '$rdi', virtual-reg: '' }
+  - { reg: '$rsi', virtual-reg: '' }
+  - { reg: '$rdx', virtual-reg: '' }
+  - { reg: '$rcx', virtual-reg: '' }
+  - { reg: '$r8', virtual-reg: '' }
+  - { reg: '$r9', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    1
+  adjustsStack:    true
+  hasCalls:        true
+  stackProtector:  ''
+  functionContext: ''
+  maxCallFrameSize: 4294967295
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  hasTailCall:     true
+  isCalleeSavedInfoValid: false
+  localFrameSize:  0
+# CHECK:       savePoint:
+# CHECK-NEXT:   - point:           '%bb.2'
+# CHECK-NEXT:     registers:
+# CHECK-NEXT:       - '$r12'
+# CHECK-NEXT:       - '$r13'
+# CHECK-NEXT:       - '$r14'
+# CHECK-NEXT:       - '$r15'
+# CHECK-NEXT:       - '$rbx'
+# CHECK:      restorePoint:
+# CHECK-NEXT:   - point:           '%bb.2'
+# CHECK-NEXT:     registers:
+# CHECK-NEXT:       - '$r12'
+# CHECK-NEXT:       - '$r13'
+# CHECK-NEXT:       - '$r14'
+# CHECK-NEXT:       - '$r15'
+# CHECK-NEXT:       - '$rbx'
+  savePoint:
+    - point:           '%bb.1'
+      registers:
+        - '$rbx'
+        - '$r12'
+        - '$r13'
+        - '$r14'
+        - '$r15'
+  restorePoint:
+    - point:           '%bb.1'
+      registers:
+        - '$rbx'
+        - '$r12'
+        - '$r13'
+        - '$r14'
+        - '$r15'
+fixedStack:      []
+stack:           []
+entry_values:    []
+callSites:       []
+debugValueSubstitutions: []
+constants:       []
+machineFunctionInfo:
+  amxProgModel:    None
+body:             |
+  bb.0.entry:
+    successors: %bb.1(0x30000000), %bb.3(0x50000000)
+    liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+  
+    TEST64rr renamable $rdi, renamable $rdi, implicit-def $eflags
+    JCC_1 %bb.1, 4, implicit killed $eflags
+  
+  bb.3:
+    successors: %bb.2(0x80000000)
+    liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+  
+    JMP_1 %bb.2
+  
+  bb.1.if.then:
+    successors: %bb.2(0x80000000)
+    liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+  
+    ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+    renamable $rbx = COPY renamable $rsi
+    renamable $r14 = COPY renamable $rdx
+    renamable $r15 = COPY renamable $rcx
+    renamable $r12 = COPY renamable $r8
+    renamable $r13 = COPY renamable $r9
+    CALL64pcrel32 target-flags(x86-plt) @bar, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $rcx, implicit $r8, implicit $r9, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+    renamable $rsi = COPY killed renamable $rbx
+    renamable $rdx = COPY killed renamable $r14
+    renamable $rcx = COPY killed renamable $r15
+    renamable $r8 = COPY killed renamable $r12
+    renamable $r9 = COPY killed renamable $r13
+    ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
+    renamable $rdi = COPY killed $rax
+  
+  bb.2.if.end:
+    liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+  
+    renamable $rdi = nuw INC64r killed renamable $rdi, implicit-def dead $eflags
+    TCRETURNdi64 target-flags(x86-plt) @qux, 0, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $rcx, implicit $r8, implicit $r9
+...
+
diff --git a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
index b1138ef9d5289..c479233a712e7 100644
--- a/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
+++ b/llvm/tools/llvm-reduce/ReducerWorkItem.cpp
@@ -62,12 +62,12 @@ static cl::opt<bool> TmpFilesAsBitcode(
     cl::desc("Always write temporary files as bitcode instead of textual IR"),
     cl::init(false), cl::cat(LLVMReduceOptions));
 
-static SmallVector<MachineBasicBlock *> constructSaveRestorePoints(
-    ArrayRef<MachineBasicBlock *> SRPoints,
+static SaveRestorePoints constructSaveRestorePoints(
+    const SaveRestorePoints &SRPoints,
     const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &BBMap) {
-  SmallVector<MachineBasicBlock *> Pts;
+  SaveRestorePoints Pts{};
   for (auto &Src : SRPoints)
-    Pts.push_back(BBMap.find(Src)->second);
+    Pts.insert({BBMap.find(Src.first)->second, Src.second});
   return Pts;
 }
 

>From 721c7af446c37c7cb0a72a42c043a704cf1f09c1 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Tue, 23 Sep 2025 11:22:27 +0200
Subject: [PATCH 26/42] [clang][bytecode] Only block pointers can be partially
 initialized (#160075)

So ignore the rest in `checkFullyInitialized()`.

Fixes #160071
---
 clang/lib/AST/ByteCode/EvaluationResult.cpp |  2 ++
 clang/test/AST/ByteCode/typeid.cpp          | 15 +++++++++++----
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/clang/lib/AST/ByteCode/EvaluationResult.cpp b/clang/lib/AST/ByteCode/EvaluationResult.cpp
index ba818788d7026..7c3c21cf28251 100644
--- a/clang/lib/AST/ByteCode/EvaluationResult.cpp
+++ b/clang/lib/AST/ByteCode/EvaluationResult.cpp
@@ -133,6 +133,8 @@ bool EvaluationResult::checkFullyInitialized(InterpState &S,
 
   if (Ptr.isZero())
     return true;
+  if (!Ptr.isBlockPointer())
+    return true;
 
   // We can't inspect dead pointers at all. Return true here so we can
   // diagnose them later.
diff --git a/clang/test/AST/ByteCode/typeid.cpp b/clang/test/AST/ByteCode/typeid.cpp
index 179a66fd7fd0a..00b01c8e40682 100644
--- a/clang/test/AST/ByteCode/typeid.cpp
+++ b/clang/test/AST/ByteCode/typeid.cpp
@@ -32,10 +32,10 @@ static_assert(&typeid(int) < &typeid(long)); // both-error {{not an integral con
 static_assert(&typeid(int) > &typeid(long)); // both-error {{not an integral constant expression}} \
                                              // both-note {{comparison between pointers to unrelated objects '&typeid(int)' and '&typeid(long)' has unspecified value}}
 
- struct Base {
-   virtual void func() ;
- };
- struct Derived : Base {};
+struct Base {
+ virtual void func() ;
+};
+struct Derived : Base {};
 
 constexpr bool test() {
   Derived derived;
@@ -52,3 +52,10 @@ int dontcrash() {
   );
   return pti.__flags == 0 ? 1 : 0;
 }
+
+namespace TypeidPtrInEvaluationResult {
+  struct C {};
+  C c = C();
+  consteval const std::type_info *ftype_info() { return &typeid(c); }
+  const std::type_info *T1 = ftype_info();
+}

>From 5381b6b85702bfc70dad4cd4fd549c6d42734ca2 Mon Sep 17 00:00:00 2001
From: Ryosuke Niwa <rniwa at webkit.org>
Date: Tue, 23 Sep 2025 02:23:27 -0700
Subject: [PATCH 27/42] [alpha.webkit.UnretainedCallArgsChecker] Treat boolean
 literal as safe (#159705)

---
 .../Checkers/WebKit/RawPtrRefCallArgsChecker.cpp             | 2 ++
 clang/test/Analysis/Checkers/WebKit/objc-mock-types.h        | 1 +
 clang/test/Analysis/Checkers/WebKit/unretained-call-args.mm  | 5 ++++-
 3 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefCallArgsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefCallArgsChecker.cpp
index df13de158a646..9585ceb40f95e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefCallArgsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefCallArgsChecker.cpp
@@ -224,6 +224,8 @@ class RawPtrRefCallArgsChecker
             // foo(123)
             return true;
           }
+          if (isa<CXXBoolLiteralExpr>(ArgOrigin))
+            return true;
           if (isa<ObjCStringLiteral>(ArgOrigin))
             return true;
           if (isASafeCallArg(ArgOrigin))
diff --git a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
index 854742b82a2d4..39dee1746158b 100644
--- a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
+++ b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h
@@ -160,6 +160,7 @@ __attribute__((objc_root_class))
 - (int)intValue;
 - (id)initWithInt:(int)value;
 + (NSNumber *)numberWithInt:(int)value;
++ (NSNumber *)numberWithBool:(BOOL)value;
 @end
 
 @interface SomeObj : NSObject
diff --git a/clang/test/Analysis/Checkers/WebKit/unretained-call-args.mm b/clang/test/Analysis/Checkers/WebKit/unretained-call-args.mm
index ddaa34d8ace45..c9d2fe861bb49 100644
--- a/clang/test/Analysis/Checkers/WebKit/unretained-call-args.mm
+++ b/clang/test/Analysis/Checkers/WebKit/unretained-call-args.mm
@@ -390,17 +390,20 @@ void foo() {
   }
 }
 
-#define YES 1
+#define YES __objc_yes
+#define NO 0
 
 namespace call_with_cf_constant {
   void bar(const NSArray *);
   void baz(const NSDictionary *);
   void boo(NSNumber *);
+  void boo(CFTypeRef);
   void foo() {
     CFArrayCreateMutable(kCFAllocatorDefault, 10);
     bar(@[@"hello"]);
     baz(@{@"hello": @3});
     boo(@YES);
+    boo(@NO);
   }
 }
 

>From fecd43cad3e6374b325fe02e694dbdcbdca8efd7 Mon Sep 17 00:00:00 2001
From: nerix <nerixdev at outlook.de>
Date: Tue, 23 Sep 2025 11:34:48 +0200
Subject: [PATCH 28/42] [LLDB][PDB] Run UDT layout test with native PDB too
 (#159769)

This test was failing with the native plugin due to two reasons:

1. The static `C::abc` was printed as `(int) ::C::abc = 123`
2. The order of the base classes of [`C`
(`List::Value`)](https://github.com/llvm/llvm-project/blob/b7e4edca3d56ec87f719c202f5397b245595f7cc/lldb/test/Shell/SymbolFile/PDB/Inputs/UdtLayoutTest.cpp#L30)
is different between DIA and the native plugin. I don't know how the
order in the DIA plugin is determined - it prints `B<0>`, `B<1>`,
`B<2>`, `B<3>`, `A`. The native plugin follows the order of the bases in
memory and prints `B<2>`, `B<3>`, `A`, `B<0>`, `B<1>` (last three are
the virtual bases).
    <details><summary>Class layout of C</summary>

    ```
    class C size(88):
            +---
    0       | +--- (base class B<2>)
    0       | | {vbptr}
    8       | | _a
    9.      | | _b (bitstart=3,nbits=6)
    11      | | _c
            | +---
    15      | +--- (base class B<3>)
    15      | | {vbptr}
    23      | | _a
    24.     | | _b (bitstart=3,nbits=6)
    26      | | _c
            | +---
            | <alignment member> (size=2)
    32      | _x
    36      | _y
    38      | _z
            | <alignment member> (size=1)
            | <alignment member> (size=2)
            +---
            +--- (virtual base A)
    40      | {vfptr}
    48      | U _u
            | <alignment member> (size=4)
            +---
            +--- (virtual base B<0>)
    56      | {vbptr}
    64      | _a
    65.     | _b (bitstart=3,nbits=6)
    67      | _c
            +---
            +--- (virtual base B<1>)
    71      | {vbptr}
    79      | _a
    80.     | _b (bitstart=3,nbits=6)
    82      | _c
            +---
    ```
    </details>

I split the tests for the plugins for better readability.
---
 .../SymbolFile/NativePDB/udt-layout.test      | 129 ++++++++++++++++++
 .../test/Shell/SymbolFile/PDB/udt-layout.test |   2 +-
 2 files changed, 130 insertions(+), 1 deletion(-)
 create mode 100644 lldb/test/Shell/SymbolFile/NativePDB/udt-layout.test

diff --git a/lldb/test/Shell/SymbolFile/NativePDB/udt-layout.test b/lldb/test/Shell/SymbolFile/NativePDB/udt-layout.test
new file mode 100644
index 0000000000000..6e971541de60c
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/NativePDB/udt-layout.test
@@ -0,0 +1,129 @@
+# REQUIRES: target-windows
+
+# Test UDT layout reconstruction
+# RUN: split-file %s %t
+# RUN: %build --compiler=clang-cl -o %t.exe -- %t/main.cpp
+# RUN: %lldb -f %t.exe -s %t/commands.input 2>&1 | FileCheck %s
+
+#--- main.cpp
+
+// this is from the DIA plugin (UdtLayoutTest.cpp)
+struct A {
+  explicit A(int u) { _u._u3 = u; }
+  A(const A &) = default;
+  virtual ~A() = default;
+
+private:
+  union U {
+    char _u1;
+    short _u2;
+    int _u3;
+  };
+
+  A::U _u;
+};
+
+#pragma pack(push, 1)
+template <int I> struct B : public virtual A {
+  B(char a, unsigned short b, int c) : A(a + b + c), _a(a), _b(b), _c(c) {}
+
+private:
+  char _a;
+  unsigned short : 3;
+  unsigned short _b : 6;
+  unsigned short : 4;
+  int _c;
+};
+#pragma pack(pop)
+
+#pragma pack(push, 16)
+class C : private virtual B<0>, public virtual B<1>, private B<2>, public B<3> {
+public:
+  C(char x, char y, char z)
+      : A(x - y + z), B<0>(x, y, z), B<1>(x * 2, y * 2, z * 2),
+        B<2>(x * 3, y * 3, z * 3), B<3>(x * 4, y * 4, z * 4), _x(x * 5),
+        _y(y * 5), _z(z * 5) {}
+
+  static int abc;
+
+private:
+  int _x;
+  short _y;
+  char _z;
+};
+int C::abc = 123;
+#pragma pack(pop)
+
+class List {
+public:
+  List() = default;
+  List(List *p, List *n, C v) : Prev(p), Next(n), Value(v) {}
+
+private:
+  List *Prev = nullptr;
+  List *Next = nullptr;
+  C Value{1, 2, 3};
+};
+
+int main() {
+  List ls[16];
+  return 0; // break here
+}
+
+#--- commands.input
+
+settings set target.max-children-depth 10
+br set -p "break here"
+run
+target variable
+frame variable
+quit
+
+# CHECK: (int) ::C::abc = 123
+
+# CHECK:      (List[16]) ls = {
+# CHECK:        [15] = {
+# CHECK-NEXT:     Prev = nullptr
+# CHECK-NEXT:     Next = nullptr
+# CHECK-NEXT:     Value = {
+# CHECK-NEXT:       B<2> = {
+# CHECK-NEXT:         A = {
+# CHECK-NEXT:           _u = (_u1 = '\x02', _u2 = 2, _u3 = 2)
+# CHECK-NEXT:         }
+# CHECK-NEXT:         _a = '\x03'
+# CHECK-NEXT:         _b = 6
+# CHECK-NEXT:         _c = 9
+# CHECK-NEXT:       }
+# CHECK-NEXT:       B<3> = {
+# CHECK-NEXT:         A = {
+# CHECK-NEXT:           _u = (_u1 = '\x02', _u2 = 2, _u3 = 2)
+# CHECK-NEXT:         }
+# CHECK-NEXT:         _a = '\x04'
+# CHECK-NEXT:         _b = 8
+# CHECK-NEXT:         _c = 12
+# CHECK-NEXT:       }
+# CHECK-NEXT:       A = {
+# CHECK-NEXT:         _u = (_u1 = '\x02', _u2 = 2, _u3 = 2)
+# CHECK-NEXT:       }
+# CHECK-NEXT:       B<0> = {
+# CHECK-NEXT:         A = {
+# CHECK-NEXT:           _u = (_u1 = '\x02', _u2 = 2, _u3 = 2)
+# CHECK-NEXT:         }
+# CHECK-NEXT:         _a = '\x01'
+# CHECK-NEXT:         _b = 2
+# CHECK-NEXT:         _c = 3
+# CHECK-NEXT:       }
+# CHECK-NEXT:       B<1> = {
+# CHECK-NEXT:         A = {
+# CHECK-NEXT:           _u = (_u1 = '\x02', _u2 = 2, _u3 = 2)
+# CHECK-NEXT:         }
+# CHECK-NEXT:         _a = '\x02'
+# CHECK-NEXT:         _b = 4
+# CHECK-NEXT:         _c = 6
+# CHECK-NEXT:       }
+# CHECK-NEXT:       _x = 5
+# CHECK-NEXT:       _y = 10
+# CHECK-NEXT:       _z = '\x0f'
+# CHECK-NEXT:     }
+# CHECK-NEXT:   }
+# CHECK-NEXT: }
diff --git a/lldb/test/Shell/SymbolFile/PDB/udt-layout.test b/lldb/test/Shell/SymbolFile/PDB/udt-layout.test
index bc68539e25ec1..619646b3f12ba 100644
--- a/lldb/test/Shell/SymbolFile/PDB/udt-layout.test
+++ b/lldb/test/Shell/SymbolFile/PDB/udt-layout.test
@@ -1,4 +1,4 @@
-REQUIRES: target-windows, lld
+REQUIRES: target-windows, lld, diasdk
 RUN: %build --compiler=clang-cl --output=%t.exe %S/Inputs/UdtLayoutTest.cpp
 RUN: %lldb -b -s %S/Inputs/UdtLayoutTest.script -- %t.exe | FileCheck %s
 

>From 29ada4dc993cff2d3fc2020241ce5c977dae1dc7 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Tue, 23 Sep 2025 10:43:29 +0100
Subject: [PATCH 29/42] [PatternMatch] Unify ap(int|float)_match (NFC)
 (#159575)

---
 llvm/include/llvm/IR/PatternMatch.h | 65 ++++++++++-------------------
 1 file changed, 22 insertions(+), 43 deletions(-)

diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index a16776c62f32b..6168e24569f99 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -259,86 +259,65 @@ inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
   return match_combine_and<LTy, RTy>(L, R);
 }
 
-struct apint_match {
-  const APInt *&Res;
+template <typename APTy> struct ap_match {
+  static_assert(std::is_same_v<APTy, APInt> || std::is_same_v<APTy, APFloat>);
+  using ConstantTy =
+      std::conditional_t<std::is_same_v<APTy, APInt>, ConstantInt, ConstantFP>;
+
+  const APTy *&Res;
   bool AllowPoison;
 
-  apint_match(const APInt *&Res, bool AllowPoison)
+  ap_match(const APTy *&Res, bool AllowPoison)
       : Res(Res), AllowPoison(AllowPoison) {}
 
   template <typename ITy> bool match(ITy *V) const {
-    if (auto *CI = dyn_cast<ConstantInt>(V)) {
+    if (auto *CI = dyn_cast<ConstantTy>(V)) {
       Res = &CI->getValue();
       return true;
     }
     if (V->getType()->isVectorTy())
       if (const auto *C = dyn_cast<Constant>(V))
         if (auto *CI =
-                dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowPoison))) {
+                dyn_cast_or_null<ConstantTy>(C->getSplatValue(AllowPoison))) {
           Res = &CI->getValue();
           return true;
         }
     return false;
   }
 };
-// Either constexpr if or renaming ConstantFP::getValueAPF to
-// ConstantFP::getValue is needed to do it via single template
-// function for both apint/apfloat.
-struct apfloat_match {
-  const APFloat *&Res;
-  bool AllowPoison;
-
-  apfloat_match(const APFloat *&Res, bool AllowPoison)
-      : Res(Res), AllowPoison(AllowPoison) {}
-
-  template <typename ITy> bool match(ITy *V) const {
-    if (auto *CI = dyn_cast<ConstantFP>(V)) {
-      Res = &CI->getValueAPF();
-      return true;
-    }
-    if (V->getType()->isVectorTy())
-      if (const auto *C = dyn_cast<Constant>(V))
-        if (auto *CI =
-                dyn_cast_or_null<ConstantFP>(C->getSplatValue(AllowPoison))) {
-          Res = &CI->getValueAPF();
-          return true;
-        }
-    return false;
-  }
-};
 
 /// Match a ConstantInt or splatted ConstantVector, binding the
 /// specified pointer to the contained APInt.
-inline apint_match m_APInt(const APInt *&Res) {
+inline ap_match<APInt> m_APInt(const APInt *&Res) {
   // Forbid poison by default to maintain previous behavior.
-  return apint_match(Res, /* AllowPoison */ false);
+  return ap_match<APInt>(Res, /* AllowPoison */ false);
 }
 
 /// Match APInt while allowing poison in splat vector constants.
-inline apint_match m_APIntAllowPoison(const APInt *&Res) {
-  return apint_match(Res, /* AllowPoison */ true);
+inline ap_match<APInt> m_APIntAllowPoison(const APInt *&Res) {
+  return ap_match<APInt>(Res, /* AllowPoison */ true);
 }
 
 /// Match APInt while forbidding poison in splat vector constants.
-inline apint_match m_APIntForbidPoison(const APInt *&Res) {
-  return apint_match(Res, /* AllowPoison */ false);
+inline ap_match<APInt> m_APIntForbidPoison(const APInt *&Res) {
+  return ap_match<APInt>(Res, /* AllowPoison */ false);
 }
 
 /// Match a ConstantFP or splatted ConstantVector, binding the
 /// specified pointer to the contained APFloat.
-inline apfloat_match m_APFloat(const APFloat *&Res) {
+inline ap_match<APFloat> m_APFloat(const APFloat *&Res) {
   // Forbid undefs by default to maintain previous behavior.
-  return apfloat_match(Res, /* AllowPoison */ false);
+  return ap_match<APFloat>(Res, /* AllowPoison */ false);
 }
 
 /// Match APFloat while allowing poison in splat vector constants.
-inline apfloat_match m_APFloatAllowPoison(const APFloat *&Res) {
-  return apfloat_match(Res, /* AllowPoison */ true);
+inline ap_match<APFloat> m_APFloatAllowPoison(const APFloat *&Res) {
+  return ap_match<APFloat>(Res, /* AllowPoison */ true);
 }
 
 /// Match APFloat while forbidding poison in splat vector constants.
-inline apfloat_match m_APFloatForbidPoison(const APFloat *&Res) {
-  return apfloat_match(Res, /* AllowPoison */ false);
+inline ap_match<APFloat> m_APFloatForbidPoison(const APFloat *&Res) {
+  return ap_match<APFloat>(Res, /* AllowPoison */ false);
 }
 
 template <int64_t Val> struct constantint_match {
@@ -1027,7 +1006,7 @@ struct bind_const_intval_ty {
 
   template <typename ITy> bool match(ITy *V) const {
     const APInt *ConstInt;
-    if (!apint_match(ConstInt, /*AllowPoison=*/false).match(V))
+    if (!ap_match<APInt>(ConstInt, /*AllowPoison=*/false).match(V))
       return false;
     if (ConstInt->getActiveBits() > 64)
       return false;

>From ee1f55b657dfc809a288b929c15dff45bbb68009 Mon Sep 17 00:00:00 2001
From: Sergei Barannikov <barannikov88 at gmail.com>
Date: Tue, 23 Sep 2025 12:44:29 +0300
Subject: [PATCH 30/42] [ARM] Auto-decode s_cc_out operand (#159956)

The operand can be decoded automatically, without the need for
post-decoding instruction modification.
Part of #156540.
---
 llvm/lib/Target/ARM/ARMInstrFormats.td        |  1 +
 .../ARM/Disassembler/ARMDisassembler.cpp      | 41 ++++++-------------
 2 files changed, 14 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMInstrFormats.td b/llvm/lib/Target/ARM/ARMInstrFormats.td
index e50740f7d57c5..1ad2485dce17f 100644
--- a/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -1219,6 +1219,7 @@ class Thumb1sI<dag oops, dag iops, AddrMode am, int sz,
                InstrItinClass itin,
                string opc, string asm, string cstr, list<dag> pattern>
   : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
+  bits<0> s;
   let OutOperandList = !con(oops, (outs s_cc_out:$s));
   let InOperandList = !con(iops, (ins pred:$p));
   let AsmString = !strconcat(opc, "${s}${p}", asm);
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 56112112a0293..b25b7e7104f20 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -119,6 +119,8 @@ class VPTStatus {
 class ARMDisassembler : public MCDisassembler {
 public:
   std::unique_ptr<const MCInstrInfo> MCII;
+  mutable ITStatus ITBlock;
+  mutable VPTStatus VPTBlock;
 
   ARMDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
                   const MCInstrInfo *MCII)
@@ -146,10 +148,6 @@ class ARMDisassembler : public MCDisassembler {
                                    ArrayRef<uint8_t> Bytes, uint64_t Address,
                                    raw_ostream &CStream) const;
 
-  mutable ITStatus ITBlock;
-  mutable VPTStatus VPTBlock;
-
-  void AddThumb1SBit(MCInst &MI, bool InITBlock) const;
   bool isVectorPredicable(const MCInst &MI) const;
   DecodeStatus AddThumbPredicate(MCInst&) const;
   void UpdateThumbPredicate(DecodeStatus &S, MCInst &MI) const;
@@ -636,6 +634,17 @@ static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
   return MCDisassembler::Success;
 }
 
+// This overload is called when decoding `s_cc_out` operand, which is not
+// encoded into instruction. It is only used in Thumb1 instructions.
+static DecodeStatus DecodeCCOutOperand(MCInst &Inst,
+                                       const MCDisassembler *Decoder) {
+  const auto *D = static_cast<const ARMDisassembler *>(Decoder);
+  // Thumb1 instructions define CPSR unless they are inside an IT block.
+  MCRegister CCR = D->ITBlock.instrInITBlock() ? ARM::NoRegister : ARM::CPSR;
+  Inst.addOperand(MCOperand::createReg(CCR));
+  return MCDisassembler::Success;
+}
+
 static DecodeStatus DecodeSORegImmOperand(MCInst &Inst, unsigned Val,
                                           uint64_t Address,
                                           const MCDisassembler *Decoder) {
@@ -6130,26 +6139,6 @@ DecodeStatus ARMDisassembler::getARMInstruction(MCInst &MI, uint64_t &Size,
   return MCDisassembler::Fail;
 }
 
-// Thumb1 instructions don't have explicit S bits.  Rather, they
-// implicitly set CPSR.  Since it's not represented in the encoding, the
-// auto-generated decoder won't inject the CPSR operand.  We need to fix
-// that as a post-pass.
-void ARMDisassembler::AddThumb1SBit(MCInst &MI, bool InITBlock) const {
-  const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
-  MCInst::iterator I = MI.begin();
-  for (unsigned i = 0; i < MCID.NumOperands; ++i, ++I) {
-    if (I == MI.end()) break;
-    if (MCID.operands()[i].isOptionalDef() &&
-        MCID.operands()[i].RegClass == ARM::CCRRegClassID) {
-      if (i > 0 && MCID.operands()[i - 1].isPredicate())
-        continue;
-      MI.insert(I,
-                MCOperand::createReg(InITBlock ? ARM::NoRegister : ARM::CPSR));
-      return;
-    }
-  }
-}
-
 bool ARMDisassembler::isVectorPredicable(const MCInst &MI) const {
   const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
@@ -6343,9 +6332,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size,
                              STI);
   if (Result) {
     Size = 2;
-    bool InITBlock = ITBlock.instrInITBlock();
     Check(Result, AddThumbPredicate(MI));
-    AddThumb1SBit(MI, InITBlock);
     return Result;
   }
 
@@ -6411,9 +6398,7 @@ DecodeStatus ARMDisassembler::getThumbInstruction(MCInst &MI, uint64_t &Size,
       decodeInstruction(DecoderTableThumb32, MI, Insn32, Address, this, STI);
   if (Result != MCDisassembler::Fail) {
     Size = 4;
-    bool InITBlock = ITBlock.instrInITBlock();
     Check(Result, AddThumbPredicate(MI));
-    AddThumb1SBit(MI, InITBlock);
     return Result;
   }
 

>From d9684f79bed54f91fb8bf3d459f75434d0425f7d Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.ritter at amd.com>
Date: Tue, 23 Sep 2025 11:52:38 +0200
Subject: [PATCH 31/42] [AMDGPU] Insert waitcnt for non-global fence release in
 GFX12 (#159282)

A fence release could be followed by a barrier, so it should wait for
the relevant memory accesses to complete, even if it is mmra-limited to
LDS. So far, that would be skipped for non-global fence releases.

Fixes SWDEV-554932.
---
 llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp  |  76 +++++------
 .../AMDGPU/memory-legalizer-barriers-mmra.ll  | 122 ++++++++++++++++++
 .../memory-legalizer-fence-mmra-local.ll      |  27 ++++
 3 files changed, 187 insertions(+), 38 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers-mmra.ll

diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index c501ebba0c7ed..c85d2bb9fe9ae 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -2514,6 +2514,8 @@ bool SIGfx12CacheControl::insertRelease(MachineBasicBlock::iterator &MI,
                                         SIAtomicAddrSpace AddrSpace,
                                         bool IsCrossAddrSpaceOrdering,
                                         Position Pos) const {
+  bool Changed = false;
+
   MachineBasicBlock &MBB = *MI->getParent();
   DebugLoc DL = MI->getDebugLoc();
 
@@ -2521,53 +2523,51 @@ bool SIGfx12CacheControl::insertRelease(MachineBasicBlock::iterator &MI,
   // writeback as all memory operations by the same thread are
   // sequentially consistent, and no other thread can access scratch
   // memory.
+  if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) {
+    if (Pos == Position::AFTER)
+      ++MI;
 
-  // Other address spaces do not have a cache.
-  if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) == SIAtomicAddrSpace::NONE)
-    return false;
-
-  if (Pos == Position::AFTER)
-    ++MI;
-
-  // global_wb is only necessary at system scope for GFX12.0,
-  // they're also necessary at device scope for GFX12.5.
-  //
-  // Emitting it for lower scopes is a slow no-op, so we omit it
-  // for performance.
-  switch (Scope) {
-  case SIAtomicScope::SYSTEM:
-    BuildMI(MBB, MI, DL, TII->get(AMDGPU::GLOBAL_WB))
-        .addImm(AMDGPU::CPol::SCOPE_SYS);
-    break;
-  case SIAtomicScope::AGENT:
-    // TODO DOCS
-    if (ST.hasGFX1250Insts()) {
+    // global_wb is only necessary at system scope for GFX12.0,
+    // they're also necessary at device scope for GFX12.5.
+    //
+    // Emitting it for lower scopes is a slow no-op, so we omit it
+    // for performance.
+    switch (Scope) {
+    case SIAtomicScope::SYSTEM:
       BuildMI(MBB, MI, DL, TII->get(AMDGPU::GLOBAL_WB))
-          .addImm(AMDGPU::CPol::SCOPE_DEV);
+          .addImm(AMDGPU::CPol::SCOPE_SYS);
+      Changed = true;
+      break;
+    case SIAtomicScope::AGENT:
+      // TODO DOCS
+      if (ST.hasGFX1250Insts()) {
+        BuildMI(MBB, MI, DL, TII->get(AMDGPU::GLOBAL_WB))
+            .addImm(AMDGPU::CPol::SCOPE_DEV);
+        Changed = true;
+      }
+      break;
+    case SIAtomicScope::CLUSTER:
+    case SIAtomicScope::WORKGROUP:
+      // No WB necessary, but we still have to wait.
+    case SIAtomicScope::WAVEFRONT:
+    case SIAtomicScope::SINGLETHREAD:
+      // No WB or wait necessary here, but insertWait takes care of that.
+      break;
+    default:
+      llvm_unreachable("Unsupported synchronization scope");
     }
-    break;
-  case SIAtomicScope::CLUSTER:
-  case SIAtomicScope::WORKGROUP:
-    // No WB necessary, but we still have to wait.
-    break;
-  case SIAtomicScope::WAVEFRONT:
-  case SIAtomicScope::SINGLETHREAD:
-    // No WB or wait necessary here.
-    return false;
-  default:
-    llvm_unreachable("Unsupported synchronization scope");
-  }
 
-  if (Pos == Position::AFTER)
-    --MI;
+    if (Pos == Position::AFTER)
+      --MI;
+  }
 
   // We always have to wait for previous memory operations (load/store) to
   // complete, whether we inserted a WB or not. If we inserted a WB (storecnt),
   // we of course need to wait for that as well.
-  insertWait(MI, Scope, AddrSpace, SIMemOp::LOAD | SIMemOp::STORE,
-             IsCrossAddrSpaceOrdering, Pos, AtomicOrdering::Release);
+  Changed |= insertWait(MI, Scope, AddrSpace, SIMemOp::LOAD | SIMemOp::STORE,
+                        IsCrossAddrSpaceOrdering, Pos, AtomicOrdering::Release);
 
-  return true;
+  return Changed;
 }
 
 bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers-mmra.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers-mmra.ll
new file mode 100644
index 0000000000000..1e6dc4e06ef4d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers-mmra.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck --check-prefixes=GFX10-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck --check-prefixes=GFX11-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck --check-prefixes=GFX12-WGP %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GFX1250 %s
+
+
+define float @test_barrier_workgroup_local_mmra(ptr addrspace(3) noundef %x, ptr addrspace(3) noundef %y, float %val) {
+; GFX10-WGP-LABEL: test_barrier_workgroup_local_mmra:
+; GFX10-WGP:       ; %bb.0:
+; GFX10-WGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-WGP-NEXT:    ds_write_b32 v0, v2
+; GFX10-WGP-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT:    s_barrier
+; GFX10-WGP-NEXT:    ds_read_b32 v0, v1
+; GFX10-WGP-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-WGP-LABEL: test_barrier_workgroup_local_mmra:
+; GFX11-WGP:       ; %bb.0:
+; GFX11-WGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-WGP-NEXT:    ds_store_b32 v0, v2
+; GFX11-WGP-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT:    s_barrier
+; GFX11-WGP-NEXT:    ds_load_b32 v0, v1
+; GFX11-WGP-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-WGP-LABEL: test_barrier_workgroup_local_mmra:
+; GFX12-WGP:       ; %bb.0:
+; GFX12-WGP-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-WGP-NEXT:    s_wait_expcnt 0x0
+; GFX12-WGP-NEXT:    s_wait_samplecnt 0x0
+; GFX12-WGP-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-WGP-NEXT:    s_wait_kmcnt 0x0
+; GFX12-WGP-NEXT:    ds_store_b32 v0, v2
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
+; GFX12-WGP-NEXT:    s_barrier_signal -1
+; GFX12-WGP-NEXT:    s_barrier_wait -1
+; GFX12-WGP-NEXT:    ds_load_b32 v0, v1
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
+; GFX12-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_barrier_workgroup_local_mmra:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    ds_store_b32 v0, v2
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    s_barrier_signal -1
+; GFX1250-NEXT:    s_barrier_wait -1
+; GFX1250-NEXT:    ds_load_b32 v0, v1
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
+  store float %val, ptr addrspace(3) %x
+  fence syncscope("workgroup") release, !mmra !0
+  tail call void @llvm.amdgcn.s.barrier()
+  fence syncscope("workgroup") acquire, !mmra !0
+  %ret = load float, ptr addrspace(3) %y
+  ret float %ret
+}
+
+define float @test_barrier_workgroup_global_mmra(ptr addrspace(1) noundef %x, ptr addrspace(1) noundef %y, float %val) {
+; GFX10-WGP-LABEL: test_barrier_workgroup_global_mmra:
+; GFX10-WGP:       ; %bb.0:
+; GFX10-WGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-WGP-NEXT:    global_store_dword v[0:1], v4, off
+; GFX10-WGP-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-WGP-NEXT:    s_barrier
+; GFX10-WGP-NEXT:    buffer_gl0_inv
+; GFX10-WGP-NEXT:    global_load_dword v0, v[2:3], off
+; GFX10-WGP-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-WGP-LABEL: test_barrier_workgroup_global_mmra:
+; GFX11-WGP:       ; %bb.0:
+; GFX11-WGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-WGP-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX11-WGP-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX11-WGP-NEXT:    s_barrier
+; GFX11-WGP-NEXT:    buffer_gl0_inv
+; GFX11-WGP-NEXT:    global_load_b32 v0, v[2:3], off
+; GFX11-WGP-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX12-WGP-LABEL: test_barrier_workgroup_global_mmra:
+; GFX12-WGP:       ; %bb.0:
+; GFX12-WGP-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-WGP-NEXT:    s_wait_expcnt 0x0
+; GFX12-WGP-NEXT:    s_wait_samplecnt 0x0
+; GFX12-WGP-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-WGP-NEXT:    s_wait_kmcnt 0x0
+; GFX12-WGP-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX12-WGP-NEXT:    s_wait_storecnt 0x0
+; GFX12-WGP-NEXT:    s_barrier_signal -1
+; GFX12-WGP-NEXT:    s_barrier_wait -1
+; GFX12-WGP-NEXT:    global_inv scope:SCOPE_SE
+; GFX12-WGP-NEXT:    global_load_b32 v0, v[2:3], off
+; GFX12-WGP-NEXT:    s_wait_loadcnt 0x0
+; GFX12-WGP-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_barrier_workgroup_global_mmra:
+; GFX1250:       ; %bb.0:
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b32 v[0:1], v4, off
+; GFX1250-NEXT:    s_wait_storecnt 0x0
+; GFX1250-NEXT:    s_barrier_signal -1
+; GFX1250-NEXT:    s_barrier_wait -1
+; GFX1250-NEXT:    global_load_b32 v0, v[2:3], off
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
+  store float %val, ptr addrspace(1) %x
+  fence syncscope("workgroup") release, !mmra !1
+  tail call void @llvm.amdgcn.s.barrier()
+  fence syncscope("workgroup") acquire, !mmra !1
+  %ret = load float, ptr addrspace(1) %y
+  ret float %ret
+}
+
+!0 = !{!"amdgpu-synchronize-as", !"local"}
+!1 = !{!"amdgpu-synchronize-as", !"global"}
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll
index cc42428e1aa06..8b0b099999f06 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-local.ll
@@ -143,14 +143,17 @@ define amdgpu_kernel void @workgroup_release_fence() {
 ;
 ; GFX12-WGP-LABEL: workgroup_release_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: workgroup_release_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: workgroup_release_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("workgroup") release, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -213,14 +216,17 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
 ;
 ; GFX12-WGP-LABEL: workgroup_acq_rel_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: workgroup_acq_rel_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: workgroup_acq_rel_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("workgroup") acq_rel, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -283,14 +289,17 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
 ;
 ; GFX12-WGP-LABEL: workgroup_seq_cst_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: workgroup_seq_cst_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: workgroup_seq_cst_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("workgroup") seq_cst, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -670,14 +679,17 @@ define amdgpu_kernel void @agent_release_fence() {
 ;
 ; GFX12-WGP-LABEL: agent_release_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: agent_release_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: agent_release_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("agent") release, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -740,14 +752,17 @@ define amdgpu_kernel void @agent_acq_rel_fence() {
 ;
 ; GFX12-WGP-LABEL: agent_acq_rel_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: agent_acq_rel_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: agent_acq_rel_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("agent") acq_rel, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -810,14 +825,17 @@ define amdgpu_kernel void @agent_seq_cst_fence() {
 ;
 ; GFX12-WGP-LABEL: agent_seq_cst_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: agent_seq_cst_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: agent_seq_cst_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence syncscope("agent") seq_cst, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -1197,14 +1215,17 @@ define amdgpu_kernel void @system_release_fence() {
 ;
 ; GFX12-WGP-LABEL: system_release_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: system_release_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: system_release_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence release, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -1267,14 +1288,17 @@ define amdgpu_kernel void @system_acq_rel_fence() {
 ;
 ; GFX12-WGP-LABEL: system_acq_rel_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: system_acq_rel_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: system_acq_rel_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence acq_rel, !mmra !{!"amdgpu-synchronize-as", !"local"}
@@ -1337,14 +1361,17 @@ define amdgpu_kernel void @system_seq_cst_fence() {
 ;
 ; GFX12-WGP-LABEL: system_seq_cst_fence:
 ; GFX12-WGP:       ; %bb.0: ; %entry
+; GFX12-WGP-NEXT:    s_wait_dscnt 0x0
 ; GFX12-WGP-NEXT:    s_endpgm
 ;
 ; GFX12-CU-LABEL: system_seq_cst_fence:
 ; GFX12-CU:       ; %bb.0: ; %entry
+; GFX12-CU-NEXT:    s_wait_dscnt 0x0
 ; GFX12-CU-NEXT:    s_endpgm
 ;
 ; GFX1250-LABEL: system_seq_cst_fence:
 ; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_dscnt 0x0
 ; GFX1250-NEXT:    s_endpgm
 entry:
   fence seq_cst, !mmra !{!"amdgpu-synchronize-as", !"local"}

>From 22d96c635776f8373615d798caa8b12bbe650dbc Mon Sep 17 00:00:00 2001
From: Ebin-McW <ebin.jose at multicorewareinc.com>
Date: Tue, 23 Sep 2025 15:34:02 +0530
Subject: [PATCH 32/42] [SPIRV] Added Packed Vector Format for
 SPV_KHR_integer_dot_product (#160088)

Added Packed Vector Format PackedVectorFormat4x8Bit and modified
existing test file.
---
 .../SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp   | 15 ++++++++++++++
 llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp       |  2 +-
 .../lib/Target/SPIRV/SPIRVSymbolicOperands.td | 20 +++++++++++++++++++
 ..._KHR_integer_dot_product_OCLtoSPIRV_int.ll | 18 ++++++++---------
 4 files changed, 45 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
index 1e3f7fc0070ff..776208bd3e693 100644
--- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
@@ -269,6 +269,21 @@ void SPIRVInstPrinter::printInst(const MCInst *MI, uint64_t Address,
           }
           break;
         }
+        case SPIRV::OpSDot:
+        case SPIRV::OpUDot:
+        case SPIRV::OpSUDot:
+        case SPIRV::OpSDotAccSat:
+        case SPIRV::OpUDotAccSat:
+        case SPIRV::OpSUDotAccSat: {
+          const unsigned NumOps = MI->getNumOperands();
+          if (NumOps > NumFixedOps) {
+            OS << ' ';
+            printSymbolicOperand<OperandCategory::PackedVectorFormatsOperand>(
+                MI, NumOps - 1, OS);
+            break;
+          }
+          break;
+        }
         default:
           printRemainingVariableOps(MI, NumFixedOps, OS);
           break;
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 2abd9d36f7606..86f445954400e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1778,7 +1778,7 @@ static bool generateDotOrFMulInst(const StringRef DemangledCall,
   // Add Packed Vector Format for Integer dot product builtins if arguments are
   // scalar
   if (!IsVec && OC != SPIRV::OpFMulS)
-    MIB.addImm(0);
+    MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
 
   return true;
 }
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 501bcb94af2ea..66ce5a2d67c3e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -211,6 +211,7 @@ def CooperativeMatrixOperandsOperand : OperandCategory;
 def SpecConstantOpOperandsOperand : OperandCategory;
 def MatrixMultiplyAccumulateOperandsOperand : OperandCategory;
 def FPEncodingOperand : OperandCategory;
+def PackedVectorFormatsOperand : OperandCategory;
 
 //===----------------------------------------------------------------------===//
 // Definition of the Environments
@@ -2026,3 +2027,22 @@ multiclass FPEncodingOperand<bits<32> value, list<Extension> reqExtensions>{
 }
 
 defm BFloat16KHR : FPEncodingOperand<0, [SPV_KHR_bfloat16]>;
+
+def PackedVectorFormats : GenericEnum, Operand<i32> {
+  let FilterClass = "PackedVectorFormats";
+  let NameField = "Name";
+  let ValueField = "Value";
+  let PrintMethod = !strconcat("printSymbolicOperand<OperandCategory::", FilterClass, "Operand>");
+}
+
+class PackedVectorFormats<string name, bits<32> value> {
+  string Name = name;
+  bits<32> Value = value;
+}
+
+multiclass PackedVectorFormatsOperand<bits<32> value, list<Extension> reqExtensions> {
+  def NAME : BuiltIn<NAME, value>;
+  defm : SymbolicOperandWithRequirements<PackedVectorFormatsOperand, value, NAME, 0, 0, reqExtensions, [], []>;
+}
+
+defm PackedVectorFormat4x8Bit : PackedVectorFormatsOperand<0, [SPV_KHR_integer_dot_product]>;
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_integer_dot_product/SPV_KHR_integer_dot_product_OCLtoSPIRV_int.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_integer_dot_product/SPV_KHR_integer_dot_product_OCLtoSPIRV_int.ll
index 284f5c34671b7..52ddc39265442 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_integer_dot_product/SPV_KHR_integer_dot_product_OCLtoSPIRV_int.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_integer_dot_product/SPV_KHR_integer_dot_product_OCLtoSPIRV_int.ll
@@ -13,15 +13,15 @@
 ; CHECK: Name %[[#SignedB:]] "ib"
 ; CHECK: Name %[[#UnsignedB:]] "ub"
 
-; CHECK: SDot %[[#]] %[[#SignedA]] %[[#SignedB]] 0
-; CHECK: SUDot %[[#]] %[[#SignedA]] %[[#UnsignedB]] 0
-; CHECK: SUDot %[[#]] %[[#SignedB]] %[[#UnsignedA]] 0
-; CHECK: UDot %[[#]] %[[#UnsignedA]] %[[#UnsignedB]] 0
-
-; CHECK: SDotAccSat %[[#]] %[[#SignedA]] %[[#SignedB]] %[[#]] 0
-; CHECK: SUDotAccSat %[[#]] %[[#SignedA]] %[[#UnsignedB]] %[[#]] 0
-; CHECK: SUDotAccSat %[[#]] %[[#SignedB]] %[[#UnsignedA]] %[[#]] 0
-; CHECK: UDotAccSat %[[#]] %[[#UnsignedA]] %[[#UnsignedB]] %[[#]] 0
+; CHECK: SDot %[[#]] %[[#SignedA]] %[[#SignedB]] PackedVectorFormat4x8Bit
+; CHECK: SUDot %[[#]] %[[#SignedA]] %[[#UnsignedB]] PackedVectorFormat4x8Bit
+; CHECK: SUDot %[[#]] %[[#SignedB]] %[[#UnsignedA]] PackedVectorFormat4x8Bit
+; CHECK: UDot %[[#]] %[[#UnsignedA]] %[[#UnsignedB]] PackedVectorFormat4x8Bit
+
+; CHECK: SDotAccSat %[[#]] %[[#SignedA]] %[[#SignedB]] %[[#]] PackedVectorFormat4x8Bit
+; CHECK: SUDotAccSat %[[#]] %[[#SignedA]] %[[#UnsignedB]] %[[#]] PackedVectorFormat4x8Bit
+; CHECK: SUDotAccSat %[[#]] %[[#SignedB]] %[[#UnsignedA]] %[[#]] PackedVectorFormat4x8Bit
+; CHECK: UDotAccSat %[[#]] %[[#UnsignedA]] %[[#UnsignedB]] %[[#]] PackedVectorFormat4x8Bit
 
 define spir_kernel void @test(i32 %ia, i32 %ua, i32 %ib, i32 %ub, i32 %ires, i32 %ures) {
 entry:

>From 28dcee41be1f11940c568d7b3532dbc0da2919bb Mon Sep 17 00:00:00 2001
From: Mary Kassayova <mary.kassayova at arm.com>
Date: Tue, 23 Sep 2025 11:05:47 +0100
Subject: [PATCH 33/42] [Sema][AArch64] Emit error for mismatched VLs on
 streaming mode transitions (#159131)

Update Sema::checkCall to handle the case where a call involves a
streaming mode transition and passes or returns scalable vector types.
Previously, Clang always issued a warning in this case, noting that the
streaming and non-streaming vector lengths may differ at runtime. With
this change:
- if both `-msve-vector-bits` and `-msve-streaming-vector-bits` are
specified and produce different fixed VL values, Clang now emits an
error rather than a warning
- If either flag is missing or vector lengths are equal, the diagnostic
remains a warning
---
 .../clang/Basic/DiagnosticSemaKinds.td        |   8 ++
 clang/lib/Sema/SemaChecking.cpp               |  28 ++++-
 ...h64-sme-streaming-nonstreaming-vl-checks.c | 111 ++++++++++++++++++
 3 files changed, 143 insertions(+), 4 deletions(-)
 create mode 100644 clang/test/Sema/aarch64-sme-streaming-nonstreaming-vl-checks.c

diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index bd896524321d1..dd82c3b092eb5 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -3983,6 +3983,14 @@ def warn_sme_locally_streaming_has_vl_args_returns : Warning<
   "%select{returning|passing}0 a VL-dependent argument %select{from|to}0 a locally streaming function is undefined"
   " behaviour when the streaming and non-streaming vector lengths are different at runtime">,
   InGroup<AArch64SMEAttributes>, DefaultIgnore;
+def warn_sme_streaming_compatible_vl_mismatch : Warning<
+  "%select{returning|passing}0 a VL-dependent argument %select{from|to}0 a %select{non-streaming|streaming}1"
+  " function is undefined behaviour when the streaming-compatible caller is%select{| not}1 in streaming"
+  " mode, because the streaming vector length (%2 bit) and non-streaming vector length (%3 bit) differ">,
+  InGroup<AArch64SMEAttributes>, DefaultIgnore;
+def err_sme_streaming_transition_vl_mismatch : Error<
+  "%select{returning|passing}0 a VL-dependent argument %select{from|to}0 a function with a different"
+  " streaming-mode is undefined behaviour because the streaming vector length (%1 bit) and non-streaming vector length (%2 bit) differ">;
 def err_conflicting_attributes_arm_agnostic : Error<
   "__arm_agnostic(\"sme_za_state\") cannot share ZA state with its caller">;
 def err_conflicting_attributes_arm_state : Error<
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 740b472b0eb16..b3b67230f7687 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3855,6 +3855,8 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
     // If the call requires a streaming-mode change and has scalable vector
     // arguments or return values, then warn the user that the streaming and
     // non-streaming vector lengths may be different.
+    // When both streaming and non-streaming vector lengths are defined and
+    // mismatched, produce an error.
     const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext);
     if (CallerFD && (!FD || !FD->getBuiltinID()) &&
         (IsScalableArg || IsScalableRet)) {
@@ -3867,12 +3869,30 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
       if (!IsCalleeStreamingCompatible &&
           (CallerFnType == SemaARM::ArmStreamingCompatible ||
            ((CallerFnType == SemaARM::ArmStreaming) ^ IsCalleeStreaming))) {
+        const LangOptions &LO = getLangOpts();
+        unsigned VL = LO.VScaleMin * 128;
+        unsigned SVL = LO.VScaleStreamingMin * 128;
+        bool IsVLMismatch = VL && SVL && VL != SVL;
+
+        auto EmitDiag = [&](bool IsArg) {
+          if (IsVLMismatch) {
+            if (CallerFnType == SemaARM::ArmStreamingCompatible)
+              // Emit warning for streaming-compatible callers
+              Diag(Loc, diag::warn_sme_streaming_compatible_vl_mismatch)
+                  << IsArg << IsCalleeStreaming << SVL << VL;
+            else
+              // Emit error otherwise
+              Diag(Loc, diag::err_sme_streaming_transition_vl_mismatch)
+                  << IsArg << SVL << VL;
+          } else
+            Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
+                << IsArg;
+        };
+
         if (IsScalableArg)
-          Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
-              << /*IsArg=*/true;
+          EmitDiag(true);
         if (IsScalableRet)
-          Diag(Loc, diag::warn_sme_streaming_pass_return_vl_to_non_streaming)
-              << /*IsArg=*/false;
+          EmitDiag(false);
       }
     }
 
diff --git a/clang/test/Sema/aarch64-sme-streaming-nonstreaming-vl-checks.c b/clang/test/Sema/aarch64-sme-streaming-nonstreaming-vl-checks.c
new file mode 100644
index 0000000000000..41d89869062ff
--- /dev/null
+++ b/clang/test/Sema/aarch64-sme-streaming-nonstreaming-vl-checks.c
@@ -0,0 +1,111 @@
+// Case 1: No vscale flags — should only produce warnings
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve -Waarch64-sme-attributes -fsyntax-only -verify=expected-noflags %s
+
+// Case 2: Explicit mismatch in vscale flags — should produce errors for 
+// streaming and non-streaming callers
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +bf16 -target-feature +sme -target-feature +sme2 -target-feature +sve -Waarch64-sme-attributes -fsyntax-only -mvscale-min=1 -mvscale-max=1 -mvscale-streaming-min=2 -mvscale-streaming-max=2 -verify=expected-flags %s
+
+void sme_streaming_with_vl_arg(__SVInt8_t a) __arm_streaming;
+
+__SVInt8_t sme_streaming_returns_vl(void) __arm_streaming;
+
+void sme_streaming_compatible_with_vl_arg(__SVInt8_t a) __arm_streaming_compatible;
+
+__SVInt8_t sme_streaming_compatible_returns_vl(void) __arm_streaming_compatible;
+
+void sme_no_streaming_with_vl_arg(__SVInt8_t a);
+
+__SVInt8_t sme_no_streaming_returns_vl(void);
+
+
+void sme_no_streaming_calling_streaming_with_vl_args() {
+  __SVInt8_t a;
+  // expected-noflags-warning at +2 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-error at +1 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  sme_streaming_with_vl_arg(a);
+}
+
+void sme_no_streaming_calling_streaming_with_return_vl() {
+  // expected-noflags-warning at +2 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-error at +1 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  __SVInt8_t r = sme_streaming_returns_vl();
+}
+
+void sme_streaming_calling_non_streaming_with_vl_args(void) __arm_streaming {
+  __SVInt8_t a;
+  // expected-noflags-warning at +2 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-error at +1 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  sme_no_streaming_with_vl_arg(a);
+}
+
+void sme_streaming_calling_non_streaming_with_return_vl(void) __arm_streaming {
+  // expected-noflags-warning at +2 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-error at +1 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  __SVInt8_t r = sme_no_streaming_returns_vl();
+}
+
+void sme_streaming_compatible_calling_streaming_with_vl_args(__SVInt8_t arg) __arm_streaming_compatible {
+  // expected-noflags-warning at +2 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-warning at +1 {{passing a VL-dependent argument to a streaming function is undefined behaviour when the streaming-compatible caller is not in streaming mode, because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  sme_streaming_with_vl_arg(arg);
+}
+
+void sme_streaming_compatible_calling_sme_streaming_return_vl(void) __arm_streaming_compatible {
+  // expected-noflags-warning at +2 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-warning at +1 {{returning a VL-dependent argument from a streaming function is undefined behaviour when the streaming-compatible caller is not in streaming mode, because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  __SVInt8_t r = sme_streaming_returns_vl();
+}
+
+void sme_streaming_compatible_calling_no_streaming_with_vl_args(__SVInt8_t arg) __arm_streaming_compatible {
+  // expected-noflags-warning at +2 {{passing a VL-dependent argument to a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-warning at +1 {{passing a VL-dependent argument to a non-streaming function is undefined behaviour when the streaming-compatible caller is in streaming mode, because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  sme_no_streaming_with_vl_arg(arg);
+}
+
+void sme_streaming_compatible_calling_no_sme_streaming_return_vl(void) __arm_streaming_compatible {
+  // expected-noflags-warning at +2 {{returning a VL-dependent argument from a function with a different streaming-mode is undefined behaviour when the streaming and non-streaming vector lengths are different at runtime}}
+  // expected-flags-warning at +1 {{returning a VL-dependent argument from a non-streaming function is undefined behaviour when the streaming-compatible caller is in streaming mode, because the streaming vector length (256 bit) and non-streaming vector length (128 bit) differ}}
+  __SVInt8_t r = sme_no_streaming_returns_vl();
+}
+
+void sme_streaming_calling_streaming_with_vl_args(__SVInt8_t a) __arm_streaming {
+  sme_streaming_with_vl_arg(a);
+}
+
+void sme_streaming_calling_streaming_with_return_vl(void) __arm_streaming {
+  __SVInt8_t r = sme_streaming_returns_vl();
+}
+
+void sme_streaming_calling_streaming_compatible_with_vl_args(__SVInt8_t a) __arm_streaming {
+  sme_streaming_compatible_with_vl_arg(a);
+}
+
+void sme_streaming_calling_streaming_compatible_with_return_vl(void) __arm_streaming {
+  __SVInt8_t r = sme_streaming_compatible_returns_vl();
+}
+
+void sme_no_streaming_calling_streaming_compatible_with_vl_args() {
+  __SVInt8_t a;
+  sme_streaming_compatible_with_vl_arg(a);
+}
+
+void sme_no_streaming_calling_streaming_compatible_with_return_vl() {
+  __SVInt8_t r = sme_streaming_compatible_returns_vl();
+}
+
+void sme_no_streaming_calling_non_streaming_with_vl_args() {
+  __SVInt8_t a;
+  sme_no_streaming_with_vl_arg(a);
+}
+
+void sme_no_streaming_calling_non_streaming_with_return_vl() {
+  __SVInt8_t r = sme_no_streaming_returns_vl();
+}
+
+void sme_streaming_compatible_calling_streaming_compatible_with_vl_args(__SVInt8_t arg) __arm_streaming_compatible {
+  sme_streaming_compatible_with_vl_arg(arg);
+}
+
+void sme_streaming_compatible_calling_streaming_compatible_with_return_vl(void) __arm_streaming_compatible {
+  __SVInt8_t r = sme_streaming_compatible_returns_vl();
+}

>From 2d5ea0923ffd1b780c028610b5664fe76ddc43e5 Mon Sep 17 00:00:00 2001
From: Tom Eccles <tom.eccles at arm.com>
Date: Tue, 23 Sep 2025 11:09:13 +0100
Subject: [PATCH 34/42] [OMPIRBuilder] Don't outline DISTRIBUTE on CPUs
 (#158317)

We use different OpenMP runtime functions on CPU and target offload. The
one used for DISTRIBUTE on target offload needs a function pointer to an
offloaded function, but the one on CPU doesn't. This caused unnessecary
overhead on CPUs because SHARED or FIRSTPRIVATE memory from the
surrounding context has to be packaged into a context structure just for
an ordinary function call (which would hopefully eventually get
inlined). This also makes the IR harder to read.
---
 llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp     | 14 +++++++-----
 ...penmp-cancel-distribute-parallel-loop.mlir |  4 ++--
 .../LLVMIR/openmp-distribute-private.mlir     | 22 +++++--------------
 mlir/test/Target/LLVMIR/openmp-llvm.mlir      | 10 +--------
 .../LLVMIR/openmp-target-generic-spmd.mlir    |  3 ---
 .../Target/LLVMIR/openmp-target-spmd.mlir     |  3 ---
 ...nmp-teams-distribute-parallel-do-simd.mlir |  8 +++----
 7 files changed, 20 insertions(+), 44 deletions(-)

diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 6d948f184392d..9b67465faab0b 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -10036,12 +10036,16 @@ OpenMPIRBuilder::createDistribute(const LocationDescription &Loc,
   if (Error Err = BodyGenCB(AllocaIP, CodeGenIP))
     return Err;
 
-  OutlineInfo OI;
-  OI.OuterAllocaBB = OuterAllocaIP.getBlock();
-  OI.EntryBB = AllocaBB;
-  OI.ExitBB = ExitBB;
+  // When using target we use different runtime functions which require a
+  // callback.
+  if (Config.isTargetDevice()) {
+    OutlineInfo OI;
+    OI.OuterAllocaBB = OuterAllocaIP.getBlock();
+    OI.EntryBB = AllocaBB;
+    OI.ExitBB = ExitBB;
 
-  addOutlineInfo(std::move(OI));
+    addOutlineInfo(std::move(OI));
+  }
   Builder.SetInsertPoint(ExitBB, ExitBB->begin());
 
   return Builder.saveIP();
diff --git a/mlir/test/Target/LLVMIR/openmp-cancel-distribute-parallel-loop.mlir b/mlir/test/Target/LLVMIR/openmp-cancel-distribute-parallel-loop.mlir
index 2339022be8979..b91c97738f87f 100644
--- a/mlir/test/Target/LLVMIR/openmp-cancel-distribute-parallel-loop.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-cancel-distribute-parallel-loop.mlir
@@ -32,7 +32,7 @@ llvm.func @cancel_distribute_parallel_do(%lb : i32, %ub : i32, %step : i32) {
 // CHECK:       omp.region.cont6:
 // CHECK:         br label %omp.region.cont4
 // CHECK:       omp.region.cont4:
-// CHECK:         br label %distribute.exit.exitStub
+// CHECK:         br label %omp.par.exit.exitStub
 // CHECK:       omp_loop.body:
 // CHECK:         %[[VAL_111:.*]] = add i32 %{{.*}}, %{{.*}}
 // CHECK:         %[[VAL_112:.*]] = mul i32 %[[VAL_111]], %{{.*}}
@@ -52,6 +52,6 @@ llvm.func @cancel_distribute_parallel_do(%lb : i32, %ub : i32, %step : i32) {
 // CHECK:       omp_loop.inc:
 // CHECK:         %[[VAL_100:.*]] = add nuw i32 %{{.*}}, 1
 // CHECK:         br label %omp_loop.header
-// CHECK:       distribute.exit.exitStub:
+// CHECK:       omp.par.exit.exitStub:
 // CHECK:         ret void
 
diff --git a/mlir/test/Target/LLVMIR/openmp-distribute-private.mlir b/mlir/test/Target/LLVMIR/openmp-distribute-private.mlir
index 188c12ebfd3c7..ef118e0ad1df2 100644
--- a/mlir/test/Target/LLVMIR/openmp-distribute-private.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-distribute-private.mlir
@@ -34,11 +34,6 @@ llvm.func @_QQmain() {
 // CHECK:       }
 
 // CHECK:       define internal void @[[TEAMS_FUNC]]({{.*}}) {
-// CHECK:         call void @[[DIST_FUNC:.*]]()
-// CHECK-NEXT:    br label %distribute.exit
-// CHECK:       }
-
-// CHECK:       define internal void @[[DIST_FUNC]]() {
 // CHECK:         %[[PRIV_VAR_ALLOC:.*]] = alloca float, align 4
 // CHECK:         %[[IV_ALLOC:.*]] = alloca i32, align 4
 
@@ -78,29 +73,22 @@ llvm.func @_QQmain() {
 
 // CHECK-LABEL: define void @_QQmain() {
 // CHECK:         %[[SHARED_VAR_ALLOC:.*]] = alloca float, i64 1, align 4
-// CHECK:         %[[SHARED_VAR_PTR:.*]] = getelementptr { ptr }, ptr %[[DIST_PARAM:.*]], i32 0, i32 0
-// CHECK:         store ptr %[[SHARED_VAR_ALLOC]], ptr %[[SHARED_VAR_PTR]], align 8
-// CHECK:         call void @[[DIST_FUNC:.*]](ptr %[[DIST_PARAM]])
-// CHECK-NEXT:    br label %distribute.exit
-// CHECK:       }
 
-// CHECK:       define internal void @[[DIST_FUNC]](ptr %[[DIST_ARG:.*]]) {
-// CHECK:         %[[SHARED_VAR_GEP:.*]] = getelementptr { ptr }, ptr %[[DIST_ARG]], i32 0, i32 0
-// CHECK:         %[[SHARED_VAR_PTR2:.*]] = load ptr, ptr %[[SHARED_VAR_GEP]], align 8
+// CHECK:       distribute.alloca:
 // CHECK:         %[[PRIV_VAR_ALLOC:.*]] = alloca float, align 4
 
 // CHECK:       omp.private.copy:
-// CHECK-NEXT:    %[[SHARED_VAR_VAL:.*]] = load float, ptr %[[SHARED_VAR_PTR2]], align 4
+// CHECK-NEXT:    %[[SHARED_VAR_VAL:.*]] = load float, ptr %[[SHARED_VAR_ALLOC]], align 4
 // CHECK-NEXT:    store float %[[SHARED_VAR_VAL]], ptr %[[PRIV_VAR_ALLOC]], align 4
 
+// CHECK:       omp.loop_nest.region:
+// CHECK-NEXT:    store float 0x40091EB860000000, ptr %[[PRIV_VAR_ALLOC]], align 4
+
 // CHECK:       omp_loop.after:
 // CHECK-NEXT:    br label %omp.region.cont
 
 // CHECK:       omp.region.cont:
 // CHECK-NEXT:   call void @foo_free(ptr %[[PRIV_VAR_ALLOC]])
-
-// CHECK:       omp.loop_nest.region:
-// CHECK-NEXT:    store float 0x40091EB860000000, ptr %[[PRIV_VAR_ALLOC]], align 4
 // CHECK:       }
 
 
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 27210bc0890ce..8bd33a382197e 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -3339,12 +3339,6 @@ llvm.func @distribute() {
 }
 
 // CHECK-LABEL: define void @distribute
-// CHECK:         call void @[[OUTLINED:.*]]({{.*}})
-// CHECK-NEXT:    br label %[[EXIT:.*]]
-// CHECK:       [[EXIT]]:
-// CHECK:         ret void
-
-// CHECK:       define internal void @[[OUTLINED]]({{.*}})
 // CHECK:         %[[LASTITER:.*]] = alloca i32
 // CHECK:         %[[LB:.*]] = alloca i64
 // CHECK:         %[[UB:.*]] = alloca i64
@@ -3381,9 +3375,7 @@ llvm.func @distribute_wsloop(%lb : i32, %ub : i32, %step : i32) {
 // CHECK:         call void{{.*}}@__kmpc_fork_call({{.*}}, ptr @[[OUTLINED_PARALLEL:.*]],
 
 // CHECK:       define internal void @[[OUTLINED_PARALLEL]]
-// CHECK:         call void @[[OUTLINED_DISTRIBUTE:.*]]({{.*}})
-
-// CHECK:       define internal void @[[OUTLINED_DISTRIBUTE]]
+// CHECK:       distribute.alloca:
 // CHECK:         %[[LASTITER:.*]] = alloca i32
 // CHECK:         %[[LB:.*]] = alloca i32
 // CHECK:         %[[UB:.*]] = alloca i32
diff --git a/mlir/test/Target/LLVMIR/openmp-target-generic-spmd.mlir b/mlir/test/Target/LLVMIR/openmp-target-generic-spmd.mlir
index 9bb2b40a43def..504d91b1f6198 100644
--- a/mlir/test/Target/LLVMIR/openmp-target-generic-spmd.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-target-generic-spmd.mlir
@@ -49,9 +49,6 @@ module attributes {omp.is_target_device = false, omp.target_triples = ["amdgcn-a
 // HOST:         call void{{.*}}@__kmpc_fork_teams({{.*}}, ptr @[[TEAMS_OUTLINE:.*]], {{.*}})
 
 // HOST:       define internal void @[[TEAMS_OUTLINE]]
-// HOST:         call void @[[DISTRIBUTE_OUTLINE:.*]]({{.*}})
-
-// HOST:       define internal void @[[DISTRIBUTE_OUTLINE]]
 // HOST:         call void @__kmpc_for_static_init{{.*}}(ptr {{.*}}, i32 {{.*}}, i32 92, ptr {{.*}}, ptr {{.*}}, ptr {{.*}}, ptr {{.*}}, i32 {{.*}}, i32 {{.*}})
 // HOST:         call void (ptr, i32, ptr, ...) @__kmpc_fork_call({{.*}}, ptr @[[PARALLEL_OUTLINE:.*]], {{.*}})
 
diff --git a/mlir/test/Target/LLVMIR/openmp-target-spmd.mlir b/mlir/test/Target/LLVMIR/openmp-target-spmd.mlir
index 86dff678bf639..20202fc7fc16c 100644
--- a/mlir/test/Target/LLVMIR/openmp-target-spmd.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-target-spmd.mlir
@@ -46,9 +46,6 @@ module attributes {omp.is_target_device = false, omp.target_triples = ["amdgcn-a
 // HOST:         call void{{.*}}@__kmpc_fork_call({{.*}}, ptr @[[PARALLEL_OUTLINE:.*]], {{.*}})
 
 // HOST:       define internal void @[[PARALLEL_OUTLINE]]
-// HOST:         call void @[[DISTRIBUTE_OUTLINE:.*]]({{.*}})
-
-// HOST:       define internal void @[[DISTRIBUTE_OUTLINE]]
 // HOST:         call void @__kmpc_dist_for_static_init{{.*}}(ptr {{.*}}, i32 {{.*}}, i32 34, ptr {{.*}}, ptr {{.*}}, ptr {{.*}}, ptr {{.*}}, ptr {{.*}}, i32 {{.*}}, i32 {{.*}})
 
 //--- device.mlir
diff --git a/mlir/test/Target/LLVMIR/openmp-teams-distribute-parallel-do-simd.mlir b/mlir/test/Target/LLVMIR/openmp-teams-distribute-parallel-do-simd.mlir
index 4d766cc1ac4f4..69d5d225d0515 100644
--- a/mlir/test/Target/LLVMIR/openmp-teams-distribute-parallel-do-simd.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-teams-distribute-parallel-do-simd.mlir
@@ -3,22 +3,20 @@
 // Check that omp.simd as a leaf of a composite construct still generates
 // the appropriate loop vectorization attribute.
 
-// CHECK-LABEL: define internal void @test_teams_distribute_parallel_do_simd..omp_par.2
+// CHECK-LABEL: define internal void @test_teams_distribute_parallel_do_simd..omp_par.1
 // CHECK: teams.body:
 // CHECK: omp.teams.region:
 
-// CHECK-LABEL: define internal void @test_teams_distribute_parallel_do_simd..omp_par.1
+// CHECK-LABEL: define internal void @test_teams_distribute_parallel_do_simd..omp_par
 // CHECK: omp.par.entry:
 // CHECK: omp.par.region:
-// CHECK: distribute.exit:
-
-// CHECK-LABEL: define internal void @test_teams_distribute_parallel_do_simd..omp_par
 // CHECK: distribute.body:
 // CHECK: omp.distribute.region:
 // CHECK: omp_loop.header:
 // CHECK: omp_loop.inc:
 // CHECK-NEXT:   %omp_loop.next = add nuw i32 %omp_loop.iv, 1
 // CHECK-NEXT:   br label %omp_loop.header, !llvm.loop ![[LOOP_ATTR:.*]]
+// CHECK: omp.par.exit.exitStub:
 
 // CHECK: ![[LOOP_ATTR]] = distinct !{![[LOOP_ATTR]], ![[LPAR:.*]], ![[LVEC:.*]]}
 // CHECK: ![[LPAR]] = !{!"llvm.loop.parallel_accesses", ![[PAR_ACC:.*]]}

>From fc63a086b4a2234f6a90e95a8140047240634d9b Mon Sep 17 00:00:00 2001
From: nerix <nerixdev at outlook.de>
Date: Tue, 23 Sep 2025 12:14:59 +0200
Subject: [PATCH 35/42] [LLDB][PDB] Warn if DIA plugin is requested but not
 available (#160067)

If LLDB was built without the DIA SDK and the DIA reader is explicitly
requested (through `LLDB_USE_NATIVE_PDB_READER=0` or `settings set
plugin.symbol-file.pdb.reader dia`), LLDB should print a warning,
because it will use the native reader in any case
(https://github.com/llvm/llvm-project/pull/159769#discussion_r2367316980).

This PR adds the warning and a test when LLDB is not built with the SDK
on Windows. I don't think any builder runs this configuration, as there
are still five failing tests. I tested this locally with and without the
SDK.
---
 .../Plugins/SymbolFile/PDB/SymbolFilePDB.cpp  | 51 +++++++------
 .../SymbolFile/NativePDB/native-setting.cpp   | 71 +++++++++++++++++++
 .../Shell/SymbolFile/PDB/native-setting.cpp   | 31 ++++++--
 3 files changed, 126 insertions(+), 27 deletions(-)
 create mode 100644 lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp

diff --git a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp
index 0e2ca1784e7e9..9f4eb1c21711d 100644
--- a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp
+++ b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp
@@ -14,6 +14,7 @@
 #include "clang/Lex/Lexer.h"
 
 #include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
+#include "lldb/Core/Debugger.h"
 #include "lldb/Core/Mangled.h"
 #include "lldb/Core/Module.h"
 #include "lldb/Core/PluginManager.h"
@@ -105,24 +106,21 @@ enum {
 #include "SymbolFilePDBPropertiesEnum.inc"
 };
 
-#if LLVM_ENABLE_DIA_SDK && defined(_WIN32)
-bool ShouldUseNativeReaderByDefault() {
-  static bool g_use_native_by_default = true;
-
-  static llvm::once_flag g_initialize;
-  llvm::call_once(g_initialize, [] {
-    llvm::StringRef env_value = ::getenv("LLDB_USE_NATIVE_PDB_READER");
-    if (!env_value.equals_insensitive("on") &&
-        !env_value.equals_insensitive("yes") &&
-        !env_value.equals_insensitive("1") &&
-        !env_value.equals_insensitive("true"))
-      g_use_native_by_default = false;
-  });
-
-  return g_use_native_by_default;
-}
+static const bool g_should_use_native_reader_by_default = [] {
+  llvm::StringRef env_value = ::getenv("LLDB_USE_NATIVE_PDB_READER");
+
+#if !LLVM_ENABLE_DIA_SDK || !defined(_WIN32)
+  // if the environment value is unset, the native reader is requested
+  if (env_value.empty())
+    return true;
 #endif
 
+  return env_value.equals_insensitive("on") ||
+         env_value.equals_insensitive("yes") ||
+         env_value.equals_insensitive("1") ||
+         env_value.equals_insensitive("true");
+}();
+
 class PluginProperties : public Properties {
 public:
   static llvm::StringRef GetSettingName() {
@@ -136,6 +134,21 @@ class PluginProperties : public Properties {
 
   bool UseNativeReader() const {
 #if LLVM_ENABLE_DIA_SDK && defined(_WIN32)
+    return IsNativeReaderRequested();
+#else
+    if (!IsNativeReaderRequested()) {
+      static std::once_flag g_warning_shown;
+      Debugger::ReportWarning(
+          "The DIA PDB reader was explicitly requested, but LLDB was built "
+          "without the DIA SDK. The native reader will be used instead.",
+          {}, &g_warning_shown);
+    }
+    return true;
+#endif
+  }
+
+private:
+  bool IsNativeReaderRequested() const {
     auto value =
         GetPropertyAtIndexAs<PDBReader>(ePropertyReader, ePDBReaderDefault);
     switch (value) {
@@ -144,12 +157,8 @@ class PluginProperties : public Properties {
     case ePDBReaderDIA:
       return false;
     default:
-    case ePDBReaderDefault:
-      return ShouldUseNativeReaderByDefault();
+      return g_should_use_native_reader_by_default;
     }
-#else
-    return true;
-#endif
   }
 };
 
diff --git a/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp b/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp
new file mode 100644
index 0000000000000..41ddba746b4ac
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp
@@ -0,0 +1,71 @@
+// REQUIRES: !diasdk, target-windows
+
+// Test plugin.symbol-file.pdb.reader setting without the DIA SDK
+// RUN: %build -o %t.exe -- %s
+// RUN: env -u LLDB_USE_NATIVE_PDB_READER %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=NO-ENV %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER= %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=NO-ENV %s
+
+// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV1 %s
+
+// RUN: env LLDB_USE_NATIVE_PDB_READER=foo %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=42 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=-1 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+
+// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb \
+// RUN:     -o 'settings set plugin.symbol-file.pdb.reader dia' \
+// RUN:     -o 'target create %t.exe' \
+// RUN:     -o 'target modules dump symfile' \
+// RUN:     2>&1 | FileCheck --check-prefix=ENV0-SET-DIA %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb \
+// RUN:     -o 'settings set plugin.symbol-file.pdb.reader dia' \
+// RUN:     -o 'target create %t.exe' \
+// RUN:     -o 'target modules dump symfile' \
+// RUN:     2>&1 | FileCheck --check-prefix=ENV1-SET-DIA %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb \
+// RUN:     -o 'settings set plugin.symbol-file.pdb.reader native' \
+// RUN:     -o 'target create %t.exe' \
+// RUN:     -o 'target modules dump symfile' \
+// RUN:     2>&1 | FileCheck --check-prefix=ENV0-SET-NATIVE %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb \
+// RUN:     -o 'settings set plugin.symbol-file.pdb.reader native' \
+// RUN:     -o 'target create %t.exe' \
+// RUN:     -o 'target modules dump symfile' \
+// RUN:     2>&1 | FileCheck --check-prefix=ENV1-SET-NATIVE %s
+
+// NO-ENV-NOT: warning:
+// NO-ENV: (lldb) target modules dump symfile
+// NO-ENV: Dumping debug symbols for 1 modules.
+// NO-ENV: SymbolFile native-pdb
+
+// ENV0: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead.
+// ENV0: (lldb) target modules dump symfile
+// ENV0: Dumping debug symbols for 1 modules.
+// ENV0: SymbolFile native-pdb
+
+// ENV1-NOT: warning:
+// ENV1: (lldb) target modules dump symfile
+// ENV1: Dumping debug symbols for 1 modules.
+// ENV1: SymbolFile native-pdb
+
+// ENV0-SET-DIA: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead.
+// ENV0-SET-DIA: (lldb) target modules dump symfile
+// ENV0-SET-DIA: Dumping debug symbols for 1 modules.
+// ENV0-SET-DIA: SymbolFile native-pdb
+
+// ENV1-SET-DIA: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead.
+// ENV1-SET-DIA: (lldb) target modules dump symfile
+// ENV1-SET-DIA: Dumping debug symbols for 1 modules.
+// ENV1-SET-DIA: SymbolFile native-pdb
+
+// ENV1-SET-NATIVE-NOT: warning:
+// ENV0-SET-NATIVE: (lldb) target modules dump symfile
+// ENV0-SET-NATIVE: Dumping debug symbols for 1 modules.
+// ENV0-SET-NATIVE: SymbolFile native-pdb
+
+// ENV1-SET-NATIVE-NOT: warning:
+// ENV1-SET-NATIVE: (lldb) target modules dump symfile
+// ENV1-SET-NATIVE: Dumping debug symbols for 1 modules.
+// ENV1-SET-NATIVE: SymbolFile native-pdb
+
+int main() {}
diff --git a/lldb/test/Shell/SymbolFile/PDB/native-setting.cpp b/lldb/test/Shell/SymbolFile/PDB/native-setting.cpp
index a3077252f08f1..f5e54592b0b31 100644
--- a/lldb/test/Shell/SymbolFile/PDB/native-setting.cpp
+++ b/lldb/test/Shell/SymbolFile/PDB/native-setting.cpp
@@ -2,49 +2,68 @@
 
 // Test plugin.symbol-file.pdb.reader setting
 // RUN: %build -o %t.exe -- %s
-// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb %t.exe -o 'target modules dump symfile' | FileCheck --check-prefix=ENV0 %s
-// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb %t.exe -o 'target modules dump symfile' | FileCheck --check-prefix=ENV1 %s
+// RUN: env -u LLDB_USE_NATIVE_PDB_READER %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=NO-ENV %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER= %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=NO-ENV %s
+
+// RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV1 %s
+
+// RUN: env LLDB_USE_NATIVE_PDB_READER=foo %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=42 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+// RUN: env LLDB_USE_NATIVE_PDB_READER=-1 %lldb %t.exe -o 'target modules dump symfile' 2>&1 | FileCheck --check-prefix=ENV0 %s
+
 // RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb \
 // RUN:     -o 'settings set plugin.symbol-file.pdb.reader dia' \
 // RUN:     -o 'target create %t.exe' \
 // RUN:     -o 'target modules dump symfile' \
-// RUN:     | FileCheck --check-prefix=ENV0-SET-DIA %s
+// RUN:     2>&1 | FileCheck --check-prefix=ENV0-SET-DIA %s
 // RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb \
 // RUN:     -o 'settings set plugin.symbol-file.pdb.reader dia' \
 // RUN:     -o 'target create %t.exe' \
 // RUN:     -o 'target modules dump symfile' \
-// RUN:     | FileCheck --check-prefix=ENV1-SET-DIA %s
+// RUN:     2>&1 | FileCheck --check-prefix=ENV1-SET-DIA %s
 // RUN: env LLDB_USE_NATIVE_PDB_READER=0 %lldb \
 // RUN:     -o 'settings set plugin.symbol-file.pdb.reader native' \
 // RUN:     -o 'target create %t.exe' \
 // RUN:     -o 'target modules dump symfile' \
-// RUN:     | FileCheck --check-prefix=ENV0-SET-NATIVE %s
+// RUN:     2>&1 | FileCheck --check-prefix=ENV0-SET-NATIVE %s
 // RUN: env LLDB_USE_NATIVE_PDB_READER=1 %lldb \
 // RUN:     -o 'settings set plugin.symbol-file.pdb.reader native' \
 // RUN:     -o 'target create %t.exe' \
 // RUN:     -o 'target modules dump symfile' \
-// RUN:     | FileCheck --check-prefix=ENV1-SET-NATIVE %s
+// RUN:     2>&1 | FileCheck --check-prefix=ENV1-SET-NATIVE %s
+
+// NO-ENV-NOT: warning:
+// NO-ENV: (lldb) target modules dump symfile
+// NO-ENV: Dumping debug symbols for 1 modules.
+// NO-ENV: SymbolFile pdb
 
+// ENV0-NOT: warning:
 // ENV0: (lldb) target modules dump symfile
 // ENV0: Dumping debug symbols for 1 modules.
 // ENV0: SymbolFile pdb
 
+// ENV1-NOT: warning:
 // ENV1: (lldb) target modules dump symfile
 // ENV1: Dumping debug symbols for 1 modules.
 // ENV1: SymbolFile native-pdb
 
+// ENV0-SET-DIA-NOT: warning:
 // ENV0-SET-DIA: (lldb) target modules dump symfile
 // ENV0-SET-DIA: Dumping debug symbols for 1 modules.
 // ENV0-SET-DIA: SymbolFile pdb
 
+// ENV1-SET-DIA-NOT: warning:
 // ENV1-SET-DIA: (lldb) target modules dump symfile
 // ENV1-SET-DIA: Dumping debug symbols for 1 modules.
 // ENV1-SET-DIA: SymbolFile pdb
 
+// ENV0-SET-NATIVE-NOT: warning:
 // ENV0-SET-NATIVE: (lldb) target modules dump symfile
 // ENV0-SET-NATIVE: Dumping debug symbols for 1 modules.
 // ENV0-SET-NATIVE: SymbolFile native-pdb
 
+// ENV1-SET-NATIVE-NOT: warning:
 // ENV1-SET-NATIVE: (lldb) target modules dump symfile
 // ENV1-SET-NATIVE: Dumping debug symbols for 1 modules.
 // ENV1-SET-NATIVE: SymbolFile native-pdb

>From cdafb13b798f257cd7ba7e26fc89b58ed7a80d1b Mon Sep 17 00:00:00 2001
From: fennecJ <hwahwa649 at gmail.com>
Date: Tue, 23 Sep 2025 18:31:11 +0800
Subject: [PATCH 36/42] [Headers][X86] Allow basic AVX512 predicate ops to be
 used in constexpr (#159998)

Fixes #158646
---
 clang/include/clang/Basic/BuiltinsX86.td   | 40 +++++------
 clang/lib/AST/ByteCode/InterpBuiltin.cpp   | 64 ++++++++++++++++++
 clang/lib/AST/ExprConstant.cpp             | 73 ++++++++++++++++++++
 clang/lib/Headers/avx512bwintrin.h         | 62 ++++++++---------
 clang/lib/Headers/avx512dqintrin.h         | 40 +++++------
 clang/lib/Headers/avx512fintrin.h          | 40 +++++------
 clang/test/CodeGen/X86/avx512bw-builtins.c | 78 ++++++++++++++++++++++
 clang/test/CodeGen/X86/avx512dq-builtins.c | 45 +++++++++++++
 clang/test/CodeGen/X86/avx512f-builtins.c  | 70 +++++++++++++++++++
 9 files changed, 410 insertions(+), 102 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 044c755d4d7cf..b80f733066b65 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -1219,15 +1219,15 @@ let Features = "avx512f", Attributes = [NoThrow, RequiredVectorWidth<512>] in {
   def scatterdiv16si : X86Builtin<"void(void *, unsigned char, _Vector<8, long long int>, _Vector<8, int>, _Constant int)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def knotqi : X86Builtin<"unsigned char(unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def knothi : X86Builtin<"unsigned short(unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def knotsi : X86Builtin<"unsigned int(unsigned int)">;
   def knotdi : X86Builtin<"unsigned long long int(unsigned long long int)">;
 }
@@ -3076,51 +3076,51 @@ let Features = "avx512dq", Attributes = [NoThrow, Const, RequiredVectorWidth<128
   def fpclassss_mask : X86Builtin<"unsigned char(_Vector<4, float>, _Constant int, unsigned char)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def kaddqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
   def kaddhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def kaddsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kadddi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def kandqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def kandhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def kandsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kanddi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def kandnqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def kandnhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def kandnsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kandndi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def korqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def korhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def korsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kordi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
@@ -3160,28 +3160,28 @@ let Features = "avx512f", Attributes = [NoThrow, Const] in {
   def kunpckhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def kxnorqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def kxnorhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def kxnorsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kxnordi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
 
-let Features = "avx512dq", Attributes = [NoThrow, Const] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr] in {
   def kxorqi : X86Builtin<"unsigned char(unsigned char, unsigned char)">;
 }
 
-let Features = "avx512f", Attributes = [NoThrow, Const] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr] in {
   def kxorhi : X86Builtin<"unsigned short(unsigned short, unsigned short)">;
 }
 
-let Features = "avx512bw", Attributes = [NoThrow, Const] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
   def kxorsi : X86Builtin<"unsigned int(unsigned int, unsigned int)">;
   def kxordi : X86Builtin<"unsigned long long int(unsigned long long int, unsigned long long int)">;
 }
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 77729a5d67c87..4b259dab000b1 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -666,6 +666,16 @@ static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
   return true;
 }
 
+static bool interp__builtin_knot(InterpState &S, CodePtr OpPC,
+                                 const InterpFrame *Frame,
+                                 const CallExpr *Call) {
+  APSInt Val =
+      popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)->getType()));
+  APInt Result = ~Val;
+  pushInteger(S, APSInt(std::move(Result), true), Call->getType());
+  return true;
+}
+
 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
                                      const InterpFrame *Frame,
                                      const CallExpr *Call) {
@@ -3607,6 +3617,60 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
   case X86::BI__builtin_ia32_selectpd_512:
     return interp__builtin_select(S, OpPC, Call);
 
+  case X86::BI__builtin_ia32_kandqi:
+  case X86::BI__builtin_ia32_kandhi:
+  case X86::BI__builtin_ia32_kandsi:
+  case X86::BI__builtin_ia32_kanddi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
+
+  case X86::BI__builtin_ia32_kandnqi:
+  case X86::BI__builtin_ia32_kandnhi:
+  case X86::BI__builtin_ia32_kandnsi:
+  case X86::BI__builtin_ia32_kandndi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
+
+  case X86::BI__builtin_ia32_korqi:
+  case X86::BI__builtin_ia32_korhi:
+  case X86::BI__builtin_ia32_korsi:
+  case X86::BI__builtin_ia32_kordi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
+
+  case X86::BI__builtin_ia32_kxnorqi:
+  case X86::BI__builtin_ia32_kxnorhi:
+  case X86::BI__builtin_ia32_kxnorsi:
+  case X86::BI__builtin_ia32_kxnordi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
+
+  case X86::BI__builtin_ia32_kxorqi:
+  case X86::BI__builtin_ia32_kxorhi:
+  case X86::BI__builtin_ia32_kxorsi:
+  case X86::BI__builtin_ia32_kxordi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
+
+  case X86::BI__builtin_ia32_knotqi:
+  case X86::BI__builtin_ia32_knothi:
+  case X86::BI__builtin_ia32_knotsi:
+  case X86::BI__builtin_ia32_knotdi:
+    return interp__builtin_knot(S, OpPC, Frame, Call);
+
+  case X86::BI__builtin_ia32_kaddqi:
+  case X86::BI__builtin_ia32_kaddhi:
+  case X86::BI__builtin_ia32_kaddsi:
+  case X86::BI__builtin_ia32_kadddi:
+    return interp__builtin_elementwise_int_binop(
+        S, OpPC, Call,
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+
   case Builtin::BI__builtin_elementwise_fshl:
     return interp__builtin_elementwise_triop(S, OpPC, Call,
                                              llvm::APIntOps::fshl);
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 3b9ca82910033..d10e2afeb2341 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -13588,6 +13588,20 @@ static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
 
 bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
                                             unsigned BuiltinOp) {
+
+  auto HandleMaskBinOp =
+      [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn)
+      -> bool {
+    APValue LHS, RHS;
+    if (!Evaluate(LHS, Info, E->getArg(0)) ||
+        !Evaluate(RHS, Info, E->getArg(1)))
+      return false;
+
+    APSInt ResultInt = Fn(LHS.getInt(), RHS.getInt());
+
+    return Success(APValue(ResultInt), E);
+  };
+
   switch (BuiltinOp) {
   default:
     return false;
@@ -14687,6 +14701,65 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
         Result.setBitVal(P++, Val[I]);
     return Success(Result, E);
   }
+
+  case X86::BI__builtin_ia32_kandqi:
+  case X86::BI__builtin_ia32_kandhi:
+  case X86::BI__builtin_ia32_kandsi:
+  case X86::BI__builtin_ia32_kanddi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
+  }
+
+  case X86::BI__builtin_ia32_kandnqi:
+  case X86::BI__builtin_ia32_kandnhi:
+  case X86::BI__builtin_ia32_kandnsi:
+  case X86::BI__builtin_ia32_kandndi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
+  }
+
+  case X86::BI__builtin_ia32_korqi:
+  case X86::BI__builtin_ia32_korhi:
+  case X86::BI__builtin_ia32_korsi:
+  case X86::BI__builtin_ia32_kordi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
+  }
+
+  case X86::BI__builtin_ia32_kxnorqi:
+  case X86::BI__builtin_ia32_kxnorhi:
+  case X86::BI__builtin_ia32_kxnorsi:
+  case X86::BI__builtin_ia32_kxnordi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
+  }
+
+  case X86::BI__builtin_ia32_kxorqi:
+  case X86::BI__builtin_ia32_kxorhi:
+  case X86::BI__builtin_ia32_kxorsi:
+  case X86::BI__builtin_ia32_kxordi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
+  }
+
+  case X86::BI__builtin_ia32_knotqi:
+  case X86::BI__builtin_ia32_knothi:
+  case X86::BI__builtin_ia32_knotsi:
+  case X86::BI__builtin_ia32_knotdi: {
+    APSInt Val;
+    if (!EvaluateInteger(E->getArg(0), Val, Info))
+      return false;
+    APSInt Result = ~Val;
+    return Success(APValue(Result), E);
+  }
+
+  case X86::BI__builtin_ia32_kaddqi:
+  case X86::BI__builtin_ia32_kaddhi:
+  case X86::BI__builtin_ia32_kaddsi:
+  case X86::BI__builtin_ia32_kadddi: {
+    return HandleMaskBinOp(
+        [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+  }
   }
 }
 
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index cf3d98d512684..8d80e3ec2911a 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -32,68 +32,63 @@ typedef unsigned long long __mmask64;
 #define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
 #endif
 
-static __inline __mmask32 __DEFAULT_FN_ATTRS
-_knot_mask32(__mmask32 __M)
-{
+static __inline __mmask32
+    __DEFAULT_FN_ATTRS_CONSTEXPR _knot_mask32(__mmask32 __M) {
   return __builtin_ia32_knotsi(__M);
 }
 
-static __inline __mmask64 __DEFAULT_FN_ATTRS _knot_mask64(__mmask64 __M) {
+static __inline __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_knot_mask64(__mmask64 __M) {
   return __builtin_ia32_knotdi(__M);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kand_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kand_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kand_mask64(__mmask64 __A,
-                                                            __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kand_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kandn_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kandn_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kandn_mask64(__mmask64 __A,
-                                                             __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kandn_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kor_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kor_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kor_mask64(__mmask64 __A,
-                                                           __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kor_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxnor_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxnor_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxnor_mask64(__mmask64 __A,
-                                                             __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxnor_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kxor_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxor_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxor_mask64(__mmask64 __A,
-                                                            __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxor_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
 }
 
@@ -165,14 +160,13 @@ _ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
   return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
 }
 
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_kadd_mask32(__mmask32 __A, __mmask32 __B)
-{
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kadd_mask32(__mmask32 __A, __mmask32 __B) {
   return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
 }
 
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kadd_mask64(__mmask64 __A,
-                                                            __mmask64 __B) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kadd_mask64(__mmask64 __A, __mmask64 __B) {
   return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
 }
 
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 95fdc2851cb9b..fb65bf933b8ad 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -29,39 +29,33 @@
 #define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
 #endif
 
-static __inline __mmask8 __DEFAULT_FN_ATTRS
-_knot_mask8(__mmask8 __M)
-{
+static __inline __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_knot_mask8(__mmask8 __M) {
   return __builtin_ia32_knotqi(__M);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kand_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kand_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kandn_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kandn_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kor_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kor_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kxnor_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxnor_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kxor_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kxor_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B);
 }
 
@@ -119,15 +113,13 @@ _ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
   return (unsigned char)__builtin_ia32_ktestzhi(__A, __B);
 }
 
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
-_kadd_mask8(__mmask8 __A, __mmask8 __B)
-{
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kadd_mask8(__mmask8 __A, __mmask8 __B) {
   return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_kadd_mask16(__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_kadd_mask16(__mmask16 __A, __mmask16 __B) {
   return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B);
 }
 
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 8dc556beccbcf..80e58425cdd71 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -520,15 +520,13 @@ _mm512_castsi512_si256(__m512i __A) {
   return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_int2mask(int __a)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_int2mask(int __a) {
   return (__mmask16)__a;
 }
 
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask2int(__mmask16 __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_mask2int(__mmask16 __a) {
   return (int)__a;
 }
 
@@ -4394,9 +4392,8 @@ _mm512_store_epi64 (void *__P, __m512i __A)
 
 /* Mask ops */
 
-static __inline __mmask16 __DEFAULT_FN_ATTRS
-_mm512_knot(__mmask16 __M)
-{
+static __inline __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_knot(__mmask16 __M) {
   return __builtin_ia32_knothi(__M);
 }
 
@@ -8085,21 +8082,18 @@ _mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
 
 #define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kand (__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16
+    __DEFAULT_FN_ATTRS_CONSTEXPR _mm512_kand(__mmask16 __A, __mmask16 __B) {
   return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kandn (__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_kandn(__mmask16 __A, __mmask16 __B) {
   return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kor (__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_kor(__mmask16 __A, __mmask16 __B) {
   return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
 }
 
@@ -8139,15 +8133,13 @@ _mm512_kunpackb (__mmask16 __A, __mmask16 __B)
   return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxnor (__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_kxnor(__mmask16 __A, __mmask16 __B) {
   return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
 }
 
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxor (__mmask16 __A, __mmask16 __B)
-{
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm512_kxor(__mmask16 __A, __mmask16 __B) {
   return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
 }
 
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index 57d90cfdd9ccf..1875e202b0c0a 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -19,6 +19,9 @@ __mmask32 test_knot_mask32(__mmask32 a) {
   return _knot_mask32(a);
 }
 
+TEST_CONSTEXPR(_knot_mask32(0) == 0xFFFFFFFF);
+TEST_CONSTEXPR(_knot_mask32(0x123456789) == 0xDCBA9876);
+
 __mmask64 test_knot_mask64(__mmask64 a) {
   // CHECK-LABEL: test_knot_mask64
   // CHECK: [[IN:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -26,6 +29,9 @@ __mmask64 test_knot_mask64(__mmask64 a) {
   return _knot_mask64(a);
 }
 
+TEST_CONSTEXPR(_knot_mask64(0) == 0xFFFFFFFFFFFFFFFF);
+TEST_CONSTEXPR(_knot_mask64(0xABCDEF0123456789) == 0x543210FEDCBA9876);
+
 __mmask32 test_kand_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kand_mask32
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -36,6 +42,12 @@ __mmask32 test_kand_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kand_mask32(0xCCCCCCCC, 0xAAAAAAAA) == 0x88888888);
+TEST_CONSTEXPR(_kand_mask32(0x123456789, 0xFFFFFFFF) == 0x23456789);
+TEST_CONSTEXPR(_kand_mask32(0xABCDEF01, 0x00000000) == 0x00000000);
+TEST_CONSTEXPR(_kand_mask32(0x56789ABC, 0xFFFFFFFF) == 0x56789ABC);
+TEST_CONSTEXPR(_kand_mask32(0xAAAAAAAA, 0x55555555) == 0x00000000);
+
 __mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kand_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -46,6 +58,12 @@ __mmask64 test_kand_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kand_mask64(0xCCCCCCCCCCCCCCCC, 0xAAAAAAAAAAAAAAAA) == 0x8888888888888888);
+TEST_CONSTEXPR(_kand_mask64(0x123456789ABCDEF0, 0xFFFFFFFFFFFFFFFF) == 0x123456789ABCDEF0);
+TEST_CONSTEXPR(_kand_mask64(0xABCDEF0123456789, 0x0000000000000000) == 0x0000000000000000);
+TEST_CONSTEXPR(_kand_mask64(0x56789ABCDEF01234, 0xFFFFFFFFFFFFFFFF) == 0x56789ABCDEF01234);
+TEST_CONSTEXPR(_kand_mask64(0xAAAAAAAAAAAAAAAA, 0x5555555555555555) == 0x0000000000000000);
+
 __mmask32 test_kandn_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kandn_mask32
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -57,6 +75,12 @@ __mmask32 test_kandn_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                      __E, __F);
 }
 
+TEST_CONSTEXPR(_kandn_mask32(0xA0A0F0F0, 0xCCCCCCCC) == 0x4C4C0C0C);
+TEST_CONSTEXPR(_kandn_mask32(0x123456789, 0xFFFFFFFF) == 0xDCBA9876);
+TEST_CONSTEXPR(_kandn_mask32(0x00000000, 0x1234ABCD) == 0x1234ABCD);
+TEST_CONSTEXPR(_kandn_mask32(0xFFFFFFFF, 0x87654321) == 0x00000000);
+TEST_CONSTEXPR(_kandn_mask32(0xAAAAAAAA, 0xAAAAAAAA) == 0x00000000);
+
 __mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kandn_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -68,6 +92,12 @@ __mmask64 test_kandn_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kandn_mask64(0xA0A0F0F0C3C33C3C, 0xCCCCCCCCFFFF0000) == 0x4C4C0C0C3C3C0000);
+TEST_CONSTEXPR(_kandn_mask64(0x0123456789ABCDEF, 0xFFFFFFFFFFFFFFFF) == 0xFEDCBA9876543210);
+TEST_CONSTEXPR(_kandn_mask64(0x0, 0x1122334455667788) == 0x1122334455667788);
+TEST_CONSTEXPR(_kandn_mask64(0xFFFFFFFFFFFFFFFF, 0x8877665544332211) == 0x0);
+TEST_CONSTEXPR(_kandn_mask64(0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA) == 0x0);
+
 __mmask32 test_kor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kor_mask32
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -78,6 +108,12 @@ __mmask32 test_kor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kor_mask32(0xF0F0A5A5, 0x0F0F5A5A) == 0xFFFFFFFF);
+TEST_CONSTEXPR(_kor_mask32(0x12345ABCD, 0x12345ABCD) == 0x2345ABCD);
+TEST_CONSTEXPR(_kor_mask32(0x1A2B3C4D, 0x00000000) == 0x1A2B3C4D);
+TEST_CONSTEXPR(_kor_mask32(0xDEADBEEF, 0xFFFFFFFF) == 0xFFFFFFFF);
+TEST_CONSTEXPR(_kor_mask32(0xAAAAAAAA, 0x55555555) == 0xFFFFFFFF);
+
 __mmask64 test_kor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kor_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -88,6 +124,12 @@ __mmask64 test_kor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                   __E, __F);
 }
 
+TEST_CONSTEXPR(_kor_mask64(0xF0A5C33C00FF11EE, 0x0F5AC33CFF00EE11) == 0xFFFFC33CFFFFFFFF);
+TEST_CONSTEXPR(_kor_mask64(0x123456789ABCDEF0, 0x123456789ABCDEF0) == 0x123456789ABCDEF0);
+TEST_CONSTEXPR(_kor_mask64(0x1122334455667788, 0x0) == 0x1122334455667788);
+TEST_CONSTEXPR(_kor_mask64(0x8877665544332211, 0xFFFFFFFFFFFFFFFF) == 0xFFFFFFFFFFFFFFFF);
+TEST_CONSTEXPR(_kor_mask64(0xAAAAAAAAAAAAAAAA, 0x5555555555555555) == 0xFFFFFFFFFFFFFFFF);
+
 __mmask32 test_kxnor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxnor_mask32
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -99,6 +141,12 @@ __mmask32 test_kxnor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                      __E, __F);
 }
 
+TEST_CONSTEXPR(_kxnor_mask32(0x1234ABCD, 0xFFFF0000) == 0x12345432);
+TEST_CONSTEXPR(_kxnor_mask32(0x123456789ABCDEF0, 0xFFFFFFFF) == 0x9ABCDEF0);
+TEST_CONSTEXPR(_kxnor_mask32(0xAABBCCDD, 0x00000000) == 0x55443322);
+TEST_CONSTEXPR(_kxnor_mask32(0x87654321, 0xFFFFFFFF) == 0x87654321);
+TEST_CONSTEXPR(_kxnor_mask32(0xAAAAAAAA, 0x55555555) == 0x00000000);
+
 __mmask64 test_kxnor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxnor_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -110,6 +158,12 @@ __mmask64 test_kxnor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kxnor_mask64(0x0123456789ABCDEF, 0xFFFFFFFF00000000) == 0x0123456776543210);
+TEST_CONSTEXPR(_kxnor_mask64(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F) == 0xFFFFFFFFFFFFFFFF);
+TEST_CONSTEXPR(_kxnor_mask64(0xFEDCBA9876543210, 0xFFFFFFFFFFFFFFFF) == 0xFEDCBA9876543210);
+TEST_CONSTEXPR(_kxnor_mask64(0xAABBCCDD11223344, 0x0000000000000000) == 0x55443322EEDDCCBB);
+TEST_CONSTEXPR(_kxnor_mask64(0xAAAAAAAAAAAAAAAA, 0x5555555555555555) == 0x0000000000000000);
+
 __mmask32 test_kxor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxor_mask32
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -120,6 +174,12 @@ __mmask32 test_kxor_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kxor_mask32(0x1234ABCD, 0xFFFF0000) == 0xEDCBABCD);
+TEST_CONSTEXPR(_kxor_mask32(0x123456789ABCDEF0, 0x00000000) == 0x9ABCDEF0);
+TEST_CONSTEXPR(_kxor_mask32(0xAABBCCDD, 0x00000000) == 0xAABBCCDD);
+TEST_CONSTEXPR(_kxor_mask32(0x87654321, 0xFFFFFFFF) == 0x789ABCDE);
+TEST_CONSTEXPR(_kxor_mask32(0xAAAAAAAA, 0x55555555) == 0xFFFFFFFF);
+
 __mmask64 test_kxor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxor_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -130,6 +190,12 @@ __mmask64 test_kxor_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kxor_mask64(0x0123456789ABCDEF, 0xFFFFFFFF00000000) == 0xFEDCBA9889ABCDEF);
+TEST_CONSTEXPR(_kxor_mask64(0xF0F0F0F0F0F0F0F0, 0x0F0F0F0F0F0F0F0F) == 0xFFFFFFFFFFFFFFFF);
+TEST_CONSTEXPR(_kxor_mask64(0xFEDCBA9876543210, 0xFFFFFFFFFFFFFFFF) == 0x0123456789ABCDEF);
+TEST_CONSTEXPR(_kxor_mask64(0xAABBCCDD11223344, 0x0000000000000000) == 0xAABBCCDD11223344);
+TEST_CONSTEXPR(_kxor_mask64(0xAAAAAAAAAAAAAAAA, 0x5555555555555555) == 0xFFFFFFFFFFFFFFFF);
+
 unsigned char test_kortestz_mask32_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
   // CHECK-LABEL: test_kortestz_mask32_u8
   // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
@@ -300,6 +366,12 @@ __mmask32 test_kadd_mask32(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kadd_mask32(100000, 200000) == 300000);
+TEST_CONSTEXPR(_kadd_mask32(2147483648, 0) == 2147483648);
+TEST_CONSTEXPR(_kadd_mask32(0xFFFFFFFF, 1) == 0);
+TEST_CONSTEXPR(_kadd_mask32(0xEE6B2800, 0x1DCD6500) == 0x0C388D00);
+TEST_CONSTEXPR(_kadd_mask32(0xFFFFFFFA, 10) == 4);
+
 __mmask64 test_kadd_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kadd_mask64
   // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1>
@@ -310,6 +382,12 @@ __mmask64 test_kadd_mask64(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kadd_mask64(10000000000, 20000000000) == 30000000000);
+TEST_CONSTEXPR(_kadd_mask64(0x8000000000000000, 0) == 0x8000000000000000);
+TEST_CONSTEXPR(_kadd_mask64(0xFFFFFFFFFFFFFFFF, 1) == 0);
+TEST_CONSTEXPR(_kadd_mask64(0xFFFFFFFFFFFFFFFA, 10) == 4);
+TEST_CONSTEXPR(_kadd_mask64(0xFA0A1F2C6C729C00, 0x0DE0B6B3A7640000) == 0x07EAD5E013D69C00);
+
 __mmask32 test_kshiftli_mask32(__m512i A, __m512i B, __m512i C, __m512i D) {
   // CHECK-LABEL: test_kshiftli_mask32
   // CHECK: [[VAL:%.*]] = bitcast i32 %{{.*}} to <32 x i1>
diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c
index a7c11eb655628..4112561216af8 100644
--- a/clang/test/CodeGen/X86/avx512dq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512dq-builtins.c
@@ -19,6 +19,9 @@ __mmask8 test_knot_mask8(__mmask8 a) {
   return _knot_mask8(a);
 }
 
+TEST_CONSTEXPR(_knot_mask8(0) == 0xFF);
+TEST_CONSTEXPR(_knot_mask8(0x345) == 0xBA);
+
 __mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kand_mask8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -29,6 +32,12 @@ __mmask8 test_kand_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kand_mask8(0x0C, 0x0A) == 0x08);
+TEST_CONSTEXPR(_kand_mask8(0x123, 0xFF) == 0x23);
+TEST_CONSTEXPR(_kand_mask8(0xAB, 0x00) == 0x00);
+TEST_CONSTEXPR(_kand_mask8(0x56, 0xFF) == 0x56);
+TEST_CONSTEXPR(_kand_mask8(0xAA, 0x55) == 0x00);
+
 __mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kandn_mask8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -40,6 +49,12 @@ __mmask8 test_kandn_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kandn_mask8(0xC3, 0xA5) == 0x24);
+TEST_CONSTEXPR(_kandn_mask8(0x1F0, 0xFF) == 0x0F);
+TEST_CONSTEXPR(_kandn_mask8(0x00, 0xB7) == 0xB7);
+TEST_CONSTEXPR(_kandn_mask8(0xFF, 0x7E) == 0x00);
+TEST_CONSTEXPR(_kandn_mask8(0x55, 0x55) == 0x00);
+
 __mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kor_mask8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -50,6 +65,12 @@ __mmask8 test_kor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m5
                                                   __E, __F);
 }
 
+TEST_CONSTEXPR(_kor_mask8(0xB3, 0x6C) == 0xFF);
+TEST_CONSTEXPR(_kor_mask8(0x1A5, 0x1A5) == 0xA5);
+TEST_CONSTEXPR(_kor_mask8(0xDE, 0x00) == 0xDE);
+TEST_CONSTEXPR(_kor_mask8(0x42, 0xFF) == 0xFF);
+TEST_CONSTEXPR(_kor_mask8(0xAA, 0x55) == 0xFF);
+
 __mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxnor_mask8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -61,6 +82,12 @@ __mmask8 test_kxnor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kxnor_mask8(0xC5, 0xAF) == 0x95);
+TEST_CONSTEXPR(_kxnor_mask8(0x1234, 0xFF) == 0x34);
+TEST_CONSTEXPR(_kxnor_mask8(0x3A, 0x00) == 0xC5);
+TEST_CONSTEXPR(_kxnor_mask8(0xB4, 0xFF) == 0xB4);
+TEST_CONSTEXPR(_kxnor_mask8(0xAA, 0x55) == 0x00);
+
 __mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxor_mask8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -71,6 +98,12 @@ __mmask8 test_kxor_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kxor_mask8(0xC5, 0xAF) == 0x6A);
+TEST_CONSTEXPR(_kxor_mask8(0x1234, 0xFFFF) == 0xCB);
+TEST_CONSTEXPR(_kxor_mask8(0xCD, 0x00) == 0xCD);
+TEST_CONSTEXPR(_kxor_mask8(0x78, 0xFF) == 0x87);
+TEST_CONSTEXPR(_kxor_mask8(0xAA, 0x55) == 0xFF);
+
 unsigned char test_kortestz_mask8_u8(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
   // CHECK-LABEL: test_kortestz_mask8_u8
   // CHECK: [[LHS:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
@@ -195,6 +228,12 @@ __mmask8 test_kadd_mask8(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kadd_mask8(20, 30) == 50);
+TEST_CONSTEXPR(_kadd_mask8(128, 0) == 128);
+TEST_CONSTEXPR(_kadd_mask8(0xFF, 1) == 0);
+TEST_CONSTEXPR(_kadd_mask8(0xC8, 0x64) == 0x2C);
+TEST_CONSTEXPR(_kadd_mask8(0xFA, 0x0F) == 0x09);
+
 __mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kadd_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -205,6 +244,12 @@ __mmask16 test_kadd_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kadd_mask16(1000, 2000) == 3000);
+TEST_CONSTEXPR(_kadd_mask16(32768, 0) == 32768);
+TEST_CONSTEXPR(_kadd_mask16(0xFFFF, 1) == 0);
+TEST_CONSTEXPR(_kadd_mask16(0xEA60, 0x2710) == 0x1170);
+TEST_CONSTEXPR(_kadd_mask16(0xFFFA, 0x14) == 0x0E);
+
 __mmask8 test_kshiftli_mask8(__m512i A, __m512i B, __m512i C, __m512i D) {
   // CHECK-LABEL: test_kshiftli_mask8
   // CHECK: [[VAL:%.*]] = bitcast i8 %{{.*}} to <8 x i1>
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 4fcdbb29b5f9e..84eaad8d99e61 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -470,6 +470,9 @@ __mmask16 test_mm512_knot(__mmask16 a)
   return _mm512_knot(a);
 }
 
+TEST_CONSTEXPR(_mm512_knot(0) == 0xFFFF);
+TEST_CONSTEXPR(_mm512_knot(0x12345) == 0xDCBA);
+
 __m512i test_mm512_alignr_epi32(__m512i a, __m512i b)
 {
   // CHECK-LABEL: test_mm512_alignr_epi32
@@ -8578,6 +8581,12 @@ __mmask16 test_mm512_kand(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_mm512_kand(0xCC, 0xAA) == 0x88);
+TEST_CONSTEXPR(_mm512_kand(0x12345, 0xFFFF) == 0x2345);
+TEST_CONSTEXPR(_mm512_kand(0xABCD, 0x0000) == 0x0000);
+TEST_CONSTEXPR(_mm512_kand(0x5678, 0xFFFF) == 0x5678);
+TEST_CONSTEXPR(_mm512_kand(0xAAAA, 0x5555) == 0x0000);
+
 __mmask16 test_mm512_kandn(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_mm512_kandn
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8590,6 +8599,12 @@ __mmask16 test_mm512_kandn(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_mm512_kandn(0xCC, 0xAA) == 0x22);
+TEST_CONSTEXPR(_mm512_kandn(0x12345, 0xFFFF) == 0xDCBA);
+TEST_CONSTEXPR(_mm512_kandn(0x0000, 0xABCD) == 0xABCD);
+TEST_CONSTEXPR(_mm512_kandn(0xFFFF, 0x5678) == 0x0000);
+TEST_CONSTEXPR(_mm512_kandn(0xAAAA, 0xAAAA) == 0x0000);
+
 __mmask16 test_mm512_kor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_mm512_kor
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8601,6 +8616,12 @@ __mmask16 test_mm512_kor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m
                                                   __E, __F);
 }
 
+TEST_CONSTEXPR(_mm512_kor(0xC1, 0xA8) == 0xE9);
+TEST_CONSTEXPR(_mm512_kor(0x12345, 0x12345) == 0x2345);
+TEST_CONSTEXPR(_mm512_kor(0xABCD, 0x0000) == 0xABCD);
+TEST_CONSTEXPR(_mm512_kor(0xABCD, 0xFFFF) == 0xFFFF);
+TEST_CONSTEXPR(_mm512_kor(0xAAAA, 0x5555) == 0xFFFF);
+
 int test_mm512_kortestc(__m512i __A, __m512i __B, __m512i __C, __m512i __D) {
   // CHECK-LABEL: test_mm512_kortestc
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8696,6 +8717,12 @@ __mmask16 test_mm512_kxnor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_mm512_kxnor(0x00C5, 0xFFAF) == 0x95);
+TEST_CONSTEXPR(_mm512_kxnor(0x12345, 0xFFFF) == 0x2345);
+TEST_CONSTEXPR(_mm512_kxnor(0xABCD, 0x0000) == 0x5432);
+TEST_CONSTEXPR(_mm512_kxnor(0x5678, 0xFFFF) == 0x5678);
+TEST_CONSTEXPR(_mm512_kxnor(0xAAAA, 0x5555) == 0x0000);
+
 __mmask16 test_mm512_kxor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_mm512_kxor
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8707,6 +8734,12 @@ __mmask16 test_mm512_kxor(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_mm512_kxor(0xC5, 0xAF) == 0x6A);
+TEST_CONSTEXPR(_mm512_kxor(0x12345, 0xFFFF) == 0xDCBA);
+TEST_CONSTEXPR(_mm512_kxor(0xABCD, 0x0000) == 0xABCD);
+TEST_CONSTEXPR(_mm512_kxor(0x5678, 0xFFFF) == 0xA987);
+TEST_CONSTEXPR(_mm512_kxor(0xAAAA, 0x5555) == 0xFFFF);
+
 __mmask16 test_knot_mask16(__mmask16 a) {
   // CHECK-LABEL: test_knot_mask16
   // CHECK: [[IN:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8715,6 +8748,9 @@ __mmask16 test_knot_mask16(__mmask16 a) {
   return _knot_mask16(a);
 }
 
+TEST_CONSTEXPR(_knot_mask16(0) == 0xFFFF);
+TEST_CONSTEXPR(_knot_mask16(0x12345) == 0xDCBA);
+
 __mmask16 test_kand_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kand_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8726,6 +8762,12 @@ __mmask16 test_kand_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kand_mask16(0xCC, 0xAA) == 0x88);
+TEST_CONSTEXPR(_kand_mask16(0x12345, 0xFFFF) == 0x2345);
+TEST_CONSTEXPR(_kand_mask16(0xABCD, 0x0000) == 0x0000);
+TEST_CONSTEXPR(_kand_mask16(0x5678, 0xFFFF) == 0x5678);
+TEST_CONSTEXPR(_kand_mask16(0xAAAA, 0x5555) == 0x0000);
+
 __mmask16 test_kandn_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kandn_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8738,6 +8780,12 @@ __mmask16 test_kandn_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                      __E, __F);
 }
 
+TEST_CONSTEXPR(_kandn_mask16(0xCC, 0xAA) == 0x22);
+TEST_CONSTEXPR(_kandn_mask16(0x12345, 0xFFFF) == 0xDCBA);
+TEST_CONSTEXPR(_kandn_mask16(0x0000, 0xABCD) == 0xABCD);
+TEST_CONSTEXPR(_kandn_mask16(0xFFFF, 0x5678) == 0x0000);
+TEST_CONSTEXPR(_kandn_mask16(0xAAAA, 0xAAAA) == 0x0000);
+
 __mmask16 test_kor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kor_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8749,6 +8797,12 @@ __mmask16 test_kor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __
                                                    __E, __F);
 }
 
+TEST_CONSTEXPR(_kor_mask16(0xC1, 0xA8) == 0xE9);
+TEST_CONSTEXPR(_kor_mask16(0x12345, 0x12345) == 0x2345);
+TEST_CONSTEXPR(_kor_mask16(0xABCD, 0x0000) == 0xABCD);
+TEST_CONSTEXPR(_kor_mask16(0xABCD, 0xFFFF) == 0xFFFF);
+TEST_CONSTEXPR(_kor_mask16(0xAAAA, 0x5555) == 0xFFFF);
+
 __mmask16 test_kxnor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxnor_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8761,6 +8815,12 @@ __mmask16 test_kxnor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D,
                                                      __E, __F);
 }
 
+TEST_CONSTEXPR(_kxnor_mask16(0x00C5, 0xFFAF) == 0x95);
+TEST_CONSTEXPR(_kxnor_mask16(0x12345, 0xFFFF) == 0x2345);
+TEST_CONSTEXPR(_kxnor_mask16(0xABCD, 0x0000) == 0x5432);
+TEST_CONSTEXPR(_kxnor_mask16(0x5678, 0xFFFF) == 0x5678);
+TEST_CONSTEXPR(_kxnor_mask16(0xAAAA, 0x5555) == 0x0000);
+
 __mmask16 test_kxor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) {
   // CHECK-LABEL: test_kxor_mask16
   // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -8772,6 +8832,12 @@ __mmask16 test_kxor_mask16(__m512i __A, __m512i __B, __m512i __C, __m512i __D, _
                                                     __E, __F);
 }
 
+TEST_CONSTEXPR(_kxor_mask16(0xC5, 0xAF) == 0x6A);
+TEST_CONSTEXPR(_kxor_mask16(0x12345, 0xFFFF) == 0xDCBA);
+TEST_CONSTEXPR(_kxor_mask16(0xABCD, 0x0000) == 0xABCD);
+TEST_CONSTEXPR(_kxor_mask16(0x5678, 0xFFFF) == 0xA987);
+TEST_CONSTEXPR(_kxor_mask16(0xAAAA, 0x5555) == 0xFFFF);
+
 __mmask16 test_kshiftli_mask16(__m512i A, __m512i B, __m512i C, __m512i D) {
   // CHECK-LABEL: test_kshiftli_mask16
   // CHECK: [[VAL:%.*]] = bitcast i16 %{{.*}} to <16 x i1>
@@ -11107,6 +11173,8 @@ __mmask16 test_mm512_int2mask(int __a)
   return _mm512_int2mask(__a);
 }
 
+TEST_CONSTEXPR(_mm512_int2mask((int)0xDEADBEEF) == 0xBEEF);
+
 int test_mm512_mask2int(__mmask16 __a)
 {
   // CHECK-LABEL: test_mm512_mask2int
@@ -11114,6 +11182,8 @@ int test_mm512_mask2int(__mmask16 __a)
   return _mm512_mask2int(__a);
 }
 
+TEST_CONSTEXPR(_mm512_mask2int(0x8000) == 0x00008000);
+
 __m128 test_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
 {
   // CHECK-LABEL: test_mm_mask_move_ss

>From b43c2740028b8d5e7aaebac55e8cccd35bcec94f Mon Sep 17 00:00:00 2001
From: Bart Chrzaszcz <bartchr at google.com>
Date: Tue, 23 Sep 2025 11:36:37 +0100
Subject: [PATCH 37/42] [mlir] Fix bazel after d8b84be #2. (#160271)

Need this as `mlir/dialects/transform/smt.py` imports it:

```py
from .._transform_smt_extension_ops_gen import *
from .._transform_smt_extension_ops_gen import _Dialect
```
---
 .../mlir/python/BUILD.bazel                     | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
index 3daf9290921e7..1c6848f543513 100644
--- a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
@@ -1446,6 +1446,22 @@ gentbl_filegroup(
     ],
 )
 
+gentbl_filegroup(
+    name = "TransformSMTExtensionOpsPyGen",
+    tbl_outs = {"mlir/dialects/_transform_smt_extension_ops_gen.py": [
+        "-gen-python-op-bindings",
+        "-bind-dialect=transform",
+        "-dialect-extension=smt_transform",
+    ]},
+    tblgen = "//mlir:mlir-tblgen",
+    td_file = "mlir/dialects/TransformSMTExtensionOps.td",
+    deps = [
+        "//mlir:OpBaseTdFiles",
+        "//mlir:TransformDialectTdFiles",
+        "//mlir:TransformSMTExtensionOpsTdFiles",
+    ],
+)
+
 filegroup(
     name = "TransformOpsPyFiles",
     srcs = [
@@ -1460,6 +1476,7 @@ filegroup(
         ":StructuredTransformOpsPyGen",
         ":TensorTransformOpsPyGen",
         ":TransformEnumPyGen",
+        ":TransformSMTExtensionOpsPyGen",
         ":TransformOpsPyGen",
         ":VectorTransformEnumPyGen",
         ":VectorTransformOpsPyGen",

>From b092da2e62b9a12b79078c3ed0314c6e086d082e Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 23 Sep 2025 18:51:53 +0800
Subject: [PATCH 38/42] [LV] Regenerate literal struct return tests with UTC.
 NFC (#160268)

This is a precommit for an upcoming patch which fixes a crash when
replicating struct calls
---
 .../AArch64/scalable-struct-return.ll         | 149 +++++--
 .../Transforms/LoopVectorize/struct-return.ll | 387 +++++++++++++++---
 2 files changed, 459 insertions(+), 77 deletions(-)

diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll
index 2941b3677af81..8830ce33aecff 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 6
 ; RUN: opt < %s -mattr=+sve -passes=loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s
 
 target triple = "aarch64-unknown-linux-gnu"
@@ -5,14 +6,41 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Tests basic vectorization of scalable homogeneous struct literal returns.
 
 define void @struct_return_f32_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f32_widen
-; CHECK-SAME:  (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]])
-; CHECK:       vector.body:
-; CHECK:         [[WIDE_CALL:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @scalable_vec_masked_foo(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; CHECK:         [[WIDE_A:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[WIDE_CALL]], 0
-; CHECK:         [[WIDE_B:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[WIDE_CALL]], 1
-; CHECK:         call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[WIDE_A]], ptr {{%.*}}, i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK:         call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[WIDE_B]], ptr {{%.*}}, i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-LABEL: define void @struct_return_f32_widen(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i64 1024, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i64 1024, [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1024)
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP7]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
+; CHECK-NEXT:    [[TMP8:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @scalable_vec_masked_foo(<vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP8]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP8]], 1
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP9]], ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP10]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
+; CHECK-NEXT:    [[TMP14:%.*]] = xor i1 [[TMP13]], true
+; CHECK-NEXT:    br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -36,14 +64,41 @@ exit:
 }
 
 define void @struct_return_f64_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f64_widen
-; CHECK-SAME:  (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]])
-; CHECK:       vector.body:
-; CHECK:         [[WIDE_CALL:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @scalable_vec_masked_bar(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; CHECK:         [[WIDE_A:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[WIDE_CALL]], 0
-; CHECK:         [[WIDE_B:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[WIDE_CALL]], 1
-; CHECK:         call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[WIDE_A]], ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK:         call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[WIDE_B]], ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-LABEL: define void @struct_return_f64_widen(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = sub i64 1024, [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i64 1024, [[TMP3]]
+; CHECK-NEXT:    [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024)
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
+; CHECK-NEXT:    [[TMP8:%.*]] = call { <vscale x 2 x double>, <vscale x 2 x double> } @scalable_vec_masked_bar(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP8]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } [[TMP8]], 1
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP9]], ptr [[TMP11]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP10]], ptr [[TMP12]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP6]])
+; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
+; CHECK-NEXT:    [[TMP14:%.*]] = xor i1 [[TMP13]], true
+; CHECK-NEXT:    br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -67,15 +122,59 @@ exit:
 }
 
 define void @struct_return_f32_widen_rt_checks(ptr %in, ptr writeonly %out_a, ptr writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks
-; CHECK-SAME:  (ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]])
-; CHECK:       entry:
-; CHECK:         br label %vector.memcheck
-; CHECK:       vector.memcheck:
-; CHECK:       vector.body:
-; CHECK:         call { <vscale x 4 x float>, <vscale x 4 x float> } @scalable_vec_masked_foo(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
-; CHECK:       for.body:
-; CHECK:         call { float, float } @foo(float [[LOAD:%.*]])
+; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks(
+; CHECK-SAME: ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[IN3:%.*]] = ptrtoint ptr [[IN]] to i64
+; CHECK-NEXT:    [[OUT_A2:%.*]] = ptrtoint ptr [[OUT_A]] to i64
+; CHECK-NEXT:    [[OUT_B1:%.*]] = ptrtoint ptr [[OUT_B]] to i64
+; CHECK-NEXT:    br label %[[VECTOR_MEMCHECK:.*]]
+; CHECK:       [[VECTOR_MEMCHECK]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 4
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[OUT_B1]], [[OUT_A2]]
+; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = mul i64 [[TMP1]], 4
+; CHECK-NEXT:    [[TMP5:%.*]] = sub i64 [[OUT_A2]], [[IN3]]
+; CHECK-NEXT:    [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP1]], 4
+; CHECK-NEXT:    [[TMP7:%.*]] = sub i64 [[OUT_B1]], [[IN3]]
+; CHECK-NEXT:    [[DIFF_CHECK5:%.*]] = icmp ult i64 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[CONFLICT_RDX6:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK5]]
+; CHECK-NEXT:    br i1 [[CONFLICT_RDX6]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
+; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 2
+; CHECK-NEXT:    [[TMP12:%.*]] = sub i64 1024, [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ugt i64 1024, [[TMP11]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1024)
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP15]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x float> poison)
+; CHECK-NEXT:    [[TMP16:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float> } @scalable_vec_masked_foo(<vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP17:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } [[TMP16]], 1
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP17]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr [[TMP20]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT:    [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP14]])
+; CHECK-NEXT:    [[TMP21:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i1 [[TMP21]], true
+; CHECK-NEXT:    br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH]]:
+;
 entry:
   br label %for.body
 
diff --git a/llvm/test/Transforms/LoopVectorize/struct-return.ll b/llvm/test/Transforms/LoopVectorize/struct-return.ll
index b721e9e489804..f2e2e2846614b 100644
--- a/llvm/test/Transforms/LoopVectorize/struct-return.ll
+++ b/llvm/test/Transforms/LoopVectorize/struct-return.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 6
 ; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -S -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize 2>%t | FileCheck %s
 ; RUN: cat %t | FileCheck --check-prefix=CHECK-REMARKS %s
 
@@ -7,14 +8,30 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
 
 ; CHECK-REMARKS: remark: {{.*}} vectorized loop
 define void @struct_return_f32_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f32_widen
-; CHECK-SAME:  (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]])
-; CHECK:       vector.body:
-; CHECK:         [[WIDE_CALL:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD:%.*]])
-; CHECK:         [[WIDE_A:%.*]] = extractvalue { <2 x float>, <2 x float> } [[WIDE_CALL]], 0
-; CHECK:         [[WIDE_B:%.*]] = extractvalue { <2 x float>, <2 x float> } [[WIDE_CALL]], 1
-; CHECK:         store <2 x float> [[WIDE_A]], ptr {{%.*}}, align 4
-; CHECK:         store <2 x float> [[WIDE_B]], ptr {{%.*}}, align 4
+; CHECK-LABEL: define void @struct_return_f32_widen(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP1]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x float> [[TMP2]], ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x float> [[TMP3]], ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -39,14 +56,30 @@ exit:
 
 ; CHECK-REMARKS: remark: {{.*}} vectorized loop
 define void @struct_return_f64_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f64_widen
-; CHECK-SAME:  (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]])
-; CHECK:        vector.body:
-; CHECK:          [[WIDE_CALL:%.*]] = call { <2 x double>, <2 x double> } @fixed_vec_bar(<2 x double> [[WIDE_LOAD:%.*]])
-; CHECK:          [[WIDE_A:%.*]] = extractvalue { <2 x double>, <2 x double> } [[WIDE_CALL]], 0
-; CHECK:          [[WIDE_B:%.*]] = extractvalue { <2 x double>, <2 x double> } [[WIDE_CALL]], 1
-; CHECK:          store <2 x double> [[WIDE_A]], ptr {{%.*}}, align 8
-; CHECK:          store <2 x double> [[WIDE_B]], ptr {{%.*}}, align 8
+; CHECK-LABEL: define void @struct_return_f64_widen(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x double>, <2 x double> } @fixed_vec_bar(<2 x double> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP1]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x double> [[TMP2]], ptr [[TMP4]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x double> [[TMP3]], ptr [[TMP5]], align 8
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -71,14 +104,43 @@ exit:
 
 ; CHECK-REMARKS: remark: {{.*}} vectorized loop
 define void @struct_return_f32_widen_rt_checks(ptr %in, ptr writeonly %out_a, ptr writeonly %out_b) {
-; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks
-; CHECK-SAME:  (ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]])
-; CHECK:       entry:
-; CHECK:         br label %vector.memcheck
-; CHECK:       vector.memcheck:
-; CHECK:       vector.body:
-; CHECK:         call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD:%.*]])
-; CHECK:       for.body:
+; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks(
+; CHECK-SAME: ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[IN3:%.*]] = ptrtoint ptr [[IN]] to i32
+; CHECK-NEXT:    [[OUT_A2:%.*]] = ptrtoint ptr [[OUT_A]] to i32
+; CHECK-NEXT:    [[OUT_B1:%.*]] = ptrtoint ptr [[OUT_B]] to i32
+; CHECK-NEXT:    br label %[[VECTOR_MEMCHECK:.*]]
+; CHECK:       [[VECTOR_MEMCHECK]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = sub i32 [[OUT_B1]], [[OUT_A2]]
+; CHECK-NEXT:    [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP0]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 [[OUT_A2]], [[IN3]]
+; CHECK-NEXT:    [[DIFF_CHECK4:%.*]] = icmp ult i32 [[TMP1]], 8
+; CHECK-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 [[OUT_B1]], [[IN3]]
+; CHECK-NEXT:    [[DIFF_CHECK5:%.*]] = icmp ult i32 [[TMP2]], 8
+; CHECK-NEXT:    [[CONFLICT_RDX6:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK5]]
+; CHECK-NEXT:    br i1 [[CONFLICT_RDX6]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x float> [[TMP5]], ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x float> [[TMP6]], ptr [[TMP8]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH]]:
+;
 ; CHECK          call { float, float } @foo(float [[LOAD:%.*]])
 entry:
   br label %for.body
@@ -105,9 +167,28 @@ exit:
 ; TODO: Allow mixed-struct type vectorization and mark overflow intrinsics as trivially vectorizable.
 ; CHECK-REMARKS:         remark: {{.*}} loop not vectorized: call instruction cannot be vectorized
 define void @test_overflow_intrinsic(ptr noalias readonly %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @test_overflow_intrinsic
-; CHECK-NOT:   vector.body:
-; CHECK-NOT:   @llvm.sadd.with.overflow.v{{.+}}i32
+; CHECK-LABEL: define void @test_overflow_intrinsic(
+; CHECK-SAME: ptr noalias readonly [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[IN_VAL]], i32 [[IN_VAL]])
+; CHECK-NEXT:    [[EXTRACT_RET:%.*]] = extractvalue { i32, i1 } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[CALL]], 1
+; CHECK-NEXT:    [[ZEXT_OVERFLOW:%.*]] = zext i1 [[EXTRACT_OVERFLOW]] to i8
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store i32 [[EXTRACT_RET]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store i8 [[ZEXT_OVERFLOW]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -133,9 +214,27 @@ exit:
 
 ; CHECK-REMARKS: remark: {{.*}} vectorized loop
 define void @struct_return_i32_three_results_widen(ptr noalias %in, ptr noalias writeonly %out_a) {
-; CHECK-LABEL: define void @struct_return_i32_three_results_widen
-; CHECK:   vector.body:
-; CHECK:     call { <2 x i32>, <2 x i32>, <2 x i32> } @fixed_vec_qux(<2 x i32> [[WIDE_LOAD:%.*]])
+; CHECK-LABEL: define void @struct_return_i32_three_results_widen(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @fixed_vec_qux(<2 x i32> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[OUT_A]], i64 [[INDEX]]
+; CHECK-NEXT:    store <2 x i32> [[TMP2]], ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -159,10 +258,50 @@ exit:
 ; (mainly it does not crash).
 ; CHECK-REMARKS: remark: {{.*}} vectorized loop
 define void @scalarized_predicated_struct_return(ptr %a) {
-; CHECK-LABEL: define void @scalarized_predicated_struct_return
-; CHECK:  vector.body:
-; CHECK:  pred.store.if:
-; CHECK:     tail call { i64, i64 } @bar_i64(i64 {{.+}})
+; CHECK-LABEL: define void @scalarized_predicated_struct_return(
+; CHECK-SAME: ptr [[A:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE2:.*]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt <2 x i64> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
+; CHECK-NEXT:    br i1 [[TMP2]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK:       [[PRED_STORE_IF]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call { i64, i64 } @bar_i64(i64 [[TMP3]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { i64, i64 } [[TMP4]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT:    [[TMP7:%.*]] = udiv i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP7]], ptr [[TMP9]], align 8
+; CHECK-NEXT:    br label %[[PRED_STORE_CONTINUE]]
+; CHECK:       [[PRED_STORE_CONTINUE]]:
+; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2]]
+; CHECK:       [[PRED_STORE_IF1]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = tail call { i64, i64 } @bar_i64(i64 [[TMP11]]) #[[ATTR4]]
+; CHECK-NEXT:    [[TMP13:%.*]] = extractvalue { i64, i64 } [[TMP12]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1
+; CHECK-NEXT:    [[TMP15:%.*]] = udiv i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP16]]
+; CHECK-NEXT:    store i64 [[TMP15]], ptr [[TMP17]], align 8
+; CHECK-NEXT:    br label %[[PRED_STORE_CONTINUE2]]
+; CHECK:       [[PRED_STORE_CONTINUE2]]:
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br [[EXIT:label %.*]]
+; CHECK:       [[SCALAR_PH:.*:]]
+;
 entry:
   br label %for.body
 
@@ -192,8 +331,27 @@ exit:
 ; Negative test. Widening structs of vectors is not supported.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_struct_of_vectors(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_struct_of_vectors
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_struct_of_vectors(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load <1 x float>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { <1 x float>, <1 x float> } @foo(<1 x float> [[IN_VAL]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue { <1 x float>, <1 x float> } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue { <1 x float>, <1 x float> } [[CALL]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store <1 x float> [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store <1 x float> [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -219,9 +377,27 @@ exit:
 ; Negative test. Widening structs with mixed element types is not supported.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_mixed_element_type_struct_return(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_mixed_element_type_struct_return
-; CHECK-NOT:   vector.body:
-; CHECK-NOT:   call {{.*}} @fixed_vec_baz
+; CHECK-LABEL: define void @negative_mixed_element_type_struct_return(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { float, i32 } @baz(float [[IN_VAL]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue { float, i32 } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue { float, i32 } [[CALL]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store i32 [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -249,9 +425,27 @@ exit:
 ; Negative test. Widening non-literal structs is not supported.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_named_struct_return(ptr noalias readonly %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_named_struct_return
-; CHECK-NOT:   vector.body:
-; CHECK-NOT:   call {{.*}} @fixed_vec_bar
+; CHECK-LABEL: define void @negative_named_struct_return(
+; CHECK-SAME: ptr noalias readonly [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = tail call [[NAMED_STRUCT:%.*]] @[[BAR_NAMED:[a-zA-Z0-9_$\"\\.-]*[a-zA-Z_$\"\\.-][a-zA-Z0-9_$\"\\.-]*]](double [[IN_VAL]]) #[[ATTR6:[0-9]+]]
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue [[NAMED_STRUCT]] [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue [[NAMED_STRUCT]] [[CALL]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store double [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 8
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store double [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 8
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -277,8 +471,28 @@ exit:
 ; Negative test. Nested homogeneous structs are not supported.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_nested_struct(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_nested_struct
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_nested_struct(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { { float, float } } @foo_nested_struct(float [[IN_VAL]]) #[[ATTR1]]
+; CHECK-NEXT:    [[EXTRACT_INNER:%.*]] = extractvalue { { float, float } } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue { float, float } [[EXTRACT_INNER]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue { float, float } [[EXTRACT_INNER]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -305,8 +519,24 @@ exit:
 ; Negative test. The second element of the struct cannot be widened.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_non_widenable_element(ptr noalias %in, ptr noalias writeonly %out_a) {
-; CHECK-LABEL: define void @negative_non_widenable_element
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_non_widenable_element(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { float, [1 x float] } @foo_one_non_widenable_element(float [[IN_VAL]]) #[[ATTR1]]
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue { float, [1 x float] } [[CALL]], 0
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -329,8 +559,28 @@ exit:
 ; Negative test. Homogeneous structs of arrays are not supported.
 ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_struct_array_elements(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_struct_array_elements
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_struct_array_elements(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { [2 x float] } @foo_arrays(float [[IN_VAL]]) #[[ATTR1]]
+; CHECK-NEXT:    [[EXTRACT_INNER:%.*]] = extractvalue { [2 x float] } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue [2 x float] [[EXTRACT_INNER]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue [2 x float] [[EXTRACT_INNER]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -357,8 +607,26 @@ exit:
 ; Negative test. Widening struct loads is not supported.
 ; CHECK-REMARKS: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_struct_load(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
-; CHECK-LABEL: define void @negative_struct_load
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_struct_load(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds { float, float }, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[CALL:%.*]] = load { float, float }, ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT:    [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0
+; CHECK-NEXT:    [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]]
+; CHECK-NEXT:    store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 
@@ -383,8 +651,23 @@ exit:
 ; Negative test. Widening struct stores is not supported.
 ; CHECK-REMARKS: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized
 define void @negative_struct_return_store_struct(ptr noalias %in, ptr noalias writeonly %out) {
-; CHECK-LABEL: define void @negative_struct_return_store_struct
-; CHECK-NOT:   vector.body:
+; CHECK-LABEL: define void @negative_struct_return_store_struct(
+; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
+; CHECK:       [[FOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds { float, float }, ptr [[IN]], i64 [[IV]]
+; CHECK-NEXT:    [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = tail call { float, float } @foo(float [[IN_VAL]]) #[[ATTR1]]
+; CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr inbounds { float, float }, ptr [[OUT]], i64 [[IV]]
+; CHECK-NEXT:    store { float, float } [[CALL]], ptr [[OUT_PTR]], align 8
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
 entry:
   br label %for.body
 

>From 82195974cf117f77f6059b78d2ee4bd706f90cb4 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Tue, 23 Sep 2025 12:55:48 +0200
Subject: [PATCH 39/42] [clang][bytecode] Remove bogus Initializing special
 case (#159933)

This doesn't seem to be needed anymore and causes problems.

Fixes #159787
---
 clang/lib/AST/ByteCode/Compiler.cpp | 9 ++-------
 clang/test/AST/ByteCode/cxx03.cpp   | 6 ++++++
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 7518cfd2cf94d..b4da99957ee88 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -2383,13 +2383,8 @@ bool Compiler<Emitter>::VisitMemberExpr(const MemberExpr *E) {
     return this->visitDeclRef(Member, E);
   }
 
-  if (Initializing) {
-    if (!this->delegate(Base))
-      return false;
-  } else {
-    if (!this->visit(Base))
-      return false;
-  }
+  if (!this->visit(Base))
+    return false;
 
   // Base above gives us a pointer on the stack.
   const auto *FD = cast<FieldDecl>(Member);
diff --git a/clang/test/AST/ByteCode/cxx03.cpp b/clang/test/AST/ByteCode/cxx03.cpp
index 10e5232b9f873..58d7f3632082d 100644
--- a/clang/test/AST/ByteCode/cxx03.cpp
+++ b/clang/test/AST/ByteCode/cxx03.cpp
@@ -40,3 +40,9 @@ struct B2 : B {
 };
 _Static_assert(&(B2().a) == &p, ""); // both-error {{taking the address of a temporary object of type 'int'}} \
                                      // both-error {{not an integral constant expression}}
+
+typedef __attribute__((ext_vector_type(4))) int vi4b;
+struct S {
+  vi4b w;
+};
+const int s = S().w[1];

>From b79a8517e08c62718d575c233d7a54a7ee31a13b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= <andrzej.warzynski at arm.com>
Date: Tue, 23 Sep 2025 12:25:55 +0100
Subject: [PATCH 40/42] [mlir][test] Fix SVE e2e test (#160269)

The original RUN line assumed that we are running on a host with SVE.
That's not necessarily the case. Failing bot:
* https://lab.llvm.org/buildbot/#/builders/121/builds/1661
---
 .../Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir            | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir
index f7d79a304acb0..6192ed345debf 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir
@@ -2,7 +2,7 @@
 // DEFINE:    -transform-interpreter -test-transform-dialect-erase-schedule \
 // DEFINE:    -cse -canonicalize -test-lower-to-llvm
 // DEFINE: %{entry_point} = main
-// DEFINE: %{run} = mlir-runner -e %{entry_point} -entry-point-result=void \
+// DEFINE: %{run} = %mcr_aarch64_cmd -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\
 // DEFINE:    -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils
 
 // RUN: %{compile} | %{run} | FileCheck %s

>From 3b5514b6f32320a969ece0ed1c58952b71b3f737 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= <andrzej.warzynski at arm.com>
Date: Tue, 23 Sep 2025 12:26:47 +0100
Subject: [PATCH 41/42] [mlir][nfc] Update test function name (#159806)

Simply align with the existing convention and with our Testing
Guidelines.
---
 mlir/test/Dialect/Vector/canonicalize.mlir | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index 08d28be3f8f73..75c762f38432a 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -3417,10 +3417,12 @@ func.func @negative_from_elements_poison_constant_mix() -> vector<2xf32> {
 
 // -----
 
-// CHECK-LABEL: func @vector_insert_const_regression(
+// Not a DenseElementsAttr, don't fold.
+
+// CHECK-LABEL: func @negative_insert_llvm_undef(
 //       CHECK:   llvm.mlir.undef
 //       CHECK:   vector.insert
-func.func @vector_insert_const_regression(%arg0: i8) -> vector<4xi8> {
+func.func @negative_insert_llvm_undef(%arg0: i8) -> vector<4xi8> {
   %0 = llvm.mlir.undef : vector<4xi8>
   %1 = vector.insert %arg0, %0 [0] : i8 into vector<4xi8>
   return %1 : vector<4xi8>

>From 13de15a7f740b171f5c6b5c5f0543082cc6ac792 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= <andrzej.warzynski at arm.com>
Date: Tue, 23 Sep 2025 12:27:48 +0100
Subject: [PATCH 42/42] [mlir][linalg] Use ub.poison when vectorizing
 pack+unpack Ops (#159536)

This patch makes sure that in the absence of an explicit pad value in
`linalg.pack`, the vectorizer will use `ub.poison` for the corresponding
Xfer Op pad value (as opposed to e.g. `arith.constant 0`).

Also, in the case of `linalg.unpack`, use `ub.poison` for the Xfer read
operation. In this case, there is no mechanism for a user to specify the
pad/pass-thru value.
---
 .../mlir/Dialect/Vector/Utils/VectorUtils.h   |  3 +-
 .../Linalg/Transforms/Vectorization.cpp       | 14 ++--
 mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp | 10 +--
 .../linalg-ops-with-patterns.mlir             |  4 +-
 .../Linalg/vectorization/linalg-ops.mlir      | 66 +++++++++----------
 5 files changed, 47 insertions(+), 50 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 97163c4532378..a57aadcdcc5b0 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -227,7 +227,8 @@ bool isLinearizableVector(VectorType type);
 ///
 /// Note: all read offsets are set to 0.
 Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source,
-                             ArrayRef<int64_t> inputVectorSizes, Value padValue,
+                             ArrayRef<int64_t> inputVectorSizes,
+                             std::optional<Value> padValue = std::nullopt,
                              bool useInBoundsInsteadOfMasking = false,
                              ArrayRef<bool> inputScalableVecDims = {});
 
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 3ee6ae1029f72..15c467b21c81e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1770,12 +1770,9 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp,
   rewriter.setInsertionPoint(packOp);
 
   Location loc = packOp.getLoc();
-  auto padValue = packOp.getPaddingValue();
-  if (!padValue) {
-    padValue = arith::ConstantOp::create(
-        rewriter, loc,
-        rewriter.getZeroAttr(packOp.getSourceType().getElementType()));
-  }
+  std::optional<Value> padValue = packOp.getPaddingValue()
+                                      ? std::optional(packOp.getPaddingValue())
+                                      : std::nullopt;
 
   // If the input vector sizes are not provided, then the vector sizes are
   // determined by the result tensor shape. In case the vector sizes aren't
@@ -1936,11 +1933,8 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
   }
 
   // -- Generate the read operation --
-  auto padValue = arith::ConstantOp::create(
-      rewriter, loc,
-      rewriter.getZeroAttr(unpackOp.getSourceType().getElementType()));
   Value readResult = vector::createReadOrMaskedRead(
-      rewriter, loc, unpackOp.getSource(), readVectorSizes, padValue,
+      rewriter, loc, unpackOp.getSource(), readVectorSizes, std::nullopt,
       useInBoundsInsteadOfMasking, readScalableVectorFlags);
 
   // -- Generate the transpose operation --
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 6551a60b5812e..025ee9a04a1de 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -319,7 +319,7 @@ bool vector::isLinearizableVector(VectorType type) {
 Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
                                      Value source,
                                      ArrayRef<int64_t> inputVectorSizes,
-                                     Value padValue,
+                                     std::optional<Value> padValue,
                                      bool useInBoundsInsteadOfMasking,
                                      ArrayRef<bool> inputScalableVecDims) {
   assert(!llvm::is_contained(inputVectorSizes, ShapedType::kDynamic) &&
@@ -328,9 +328,11 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc,
   auto sourceShape = sourceShapedType.getShape();
   assert(sourceShape.size() == inputVectorSizes.size() &&
          "expected same ranks.");
-  auto vectorType = VectorType::get(inputVectorSizes, padValue.getType(),
-                                    inputScalableVecDims);
-  assert(padValue.getType() == sourceShapedType.getElementType() &&
+  auto vectorType =
+      VectorType::get(inputVectorSizes, sourceShapedType.getElementType(),
+                      inputScalableVecDims);
+  assert((!padValue.has_value() ||
+          padValue.value().getType() == sourceShapedType.getElementType()) &&
          "expected same pad element type to match source element type");
   int64_t readRank = inputVectorSizes.size();
   auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
index c09046b08e898..35f520a9f22a8 100644
--- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
@@ -339,8 +339,8 @@ module attributes {transform.with_named_sequence} {
 // CHECK-LABEL:   func.func @test_vectorize_pack(
 // CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x8x16xf32>,
 // CHECK-SAME:      %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
-// CHECK:           %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK:           %[[VAL_3:.*]] = arith.constant 0 : index
+// CHECK-DAG:       %[[VAL_2:.*]] = ub.poison : f32
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 0 : index
 // CHECK:           %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
 // CHECK:           %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
 // CHECK:           %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32>
diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir
index aa86678ba405f..62bf1f55c9af2 100644
--- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir
@@ -1068,16 +1068,16 @@ module attributes {transform.with_named_sequence} {
 // CHECK-SAME:      %[[DEST:.*]]: tensor<?x?xf32>,
 // CHECK-SAME:      %[[SRC:.*]]: tensor<?x?x16x2xf32>
 func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec(%dest: tensor<?x?xf32>, %src: tensor<?x?x16x2xf32>) -> tensor<?x?xf32> {
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00
-  // CHECK: %[[C01:.*]] = arith.constant 0
-  // CHECK: %[[C02:.*]] = arith.constant 0
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-DAG: %[[C01:.*]] = arith.constant 0
+  // CHECK-DAG: %[[C02:.*]] = arith.constant 0
   // CHECK: %[[DIM4:.*]] = tensor.dim %[[SRC]], %[[C02]] : tensor<?x?x16x2xf32>
   // CHECK: %[[CNST14:.*]] = arith.constant 1
   // CHECK: %[[DIM6:.*]] = tensor.dim %[[SRC]], %[[CNST14]] : tensor<?x?x16x2xf32>
   // CHECK: %[[CNST16:.*]] = arith.constant 16 : index
   // CHECK: %[[CNST2:.*]] = arith.constant 2 : index
   // CHECK: %[[MASK_READ:.*]] = vector.create_mask %[[DIM4]], %[[DIM6]], %[[CNST16]], %[[CNST2]] : vector<2x1x[16]x2xi1>
-  // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} : tensor<?x?x16x2xf32>, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32>
+  // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} %[[PAD]] {{.*}}: tensor<?x?x16x2xf32>, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32>
   // CHECK: %[[TR:.*]] = vector.transpose %[[READ]], [0, 3, 1, 2] : vector<2x1x[16]x2xf32> to vector<2x2x1x[16]xf32>
   // CHECK: %[[SC:.*]] = vector.shape_cast %[[TR]] : vector<2x2x1x[16]xf32> to vector<4x[16]xf32>
   // CHECK: %[[MASK_WRITE:.*]] = vector.create_mask {{.*}} : vector<4x[16]xi1>
@@ -1100,9 +1100,9 @@ module attributes {transform.with_named_sequence} {
 // CHECK-SAME:      %[[DEST:.*]]: tensor<?x?xf32>,
 // CHECK-SAME:      %[[SRC:.*]]: tensor<?x?x?x2xf32>
 func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec_and_tile_size(%dest: tensor<?x?xf32>, %src: tensor<?x?x?x2xf32>) -> tensor<?x?xf32> {
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00
-  // CHECK: %[[C01:.*]] = arith.constant 0
-  // CHECK: %[[C02:.*]] = arith.constant 0
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-DAG: %[[C01:.*]] = arith.constant 0
+  // CHECK-DAG: %[[C02:.*]] = arith.constant 0
   // CHECK: %[[DIM4:.*]] = tensor.dim %[[SRC]], %[[C02]] : tensor<?x?x?x2xf32>
   // CHECK: %[[C1_2:.*]] = arith.constant 1
   // CHECK: %[[DIM6:.*]] = tensor.dim %[[SRC]], %[[C1_2]] : tensor<?x?x?x2xf32>
@@ -1110,7 +1110,7 @@ func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec_and_tile_size(%dest
   // CHECK: %[[DIM_2:.*]] = tensor.dim %[[SRC]], %[[C2]] : tensor<?x?x?x2xf32>
   // CHECK: %[[C2_1:.*]] = arith.constant 2 : index
   // CHECK: %[[MASK_READ:.*]] = vector.create_mask %[[DIM4]], %[[DIM6]], %[[DIM_2]], %[[C2_1]] : vector<2x1x[16]x2xi1>
-  // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} : tensor<?x?x?x2xf32>, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32>
+  // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} %[[PAD]] {{.*}}: tensor<?x?x?x2xf32>, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32>
   // CHECK: %[[TR:.*]] = vector.transpose %[[READ]], [0, 3, 1, 2] : vector<2x1x[16]x2xf32> to vector<2x2x1x[16]xf32>
   // CHECK: %[[SC:.*]] = vector.shape_cast %[[TR]] : vector<2x2x1x[16]xf32> to vector<4x[16]xf32>
   // CHECK: %[[MASK_WRITE:.*]] = vector.create_mask {{.*}} : vector<4x[16]xi1>
@@ -1138,14 +1138,14 @@ module attributes {transform.with_named_sequence} {
 // CHECK-SAME:      %[[SRC:.*]]: tensor<8x8x32x16xf32>
 // CHECK-SAME:      %[[DEST:.*]]: tensor<256x128xf32>
 func.func @test_vectorize_unpack(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> {
-    // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-    // CHECK: %[[C0:.*]]= arith.constant 0 : index
-    // CHECK: %[[C8:.*]] = arith.constant 8 : index
-    // CHECK: %[[C80:.*]] = arith.constant 8 : index
-    // CHECK: %[[C32:.*]] = arith.constant 32 : index
-    // CHECK: %[[C16:.*]] = arith.constant 16 : index
+    // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+    // CHECK-DAG: %[[C0:.*]]= arith.constant 0 : index
+    // CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index
+    // CHECK-DAG: %[[C80:.*]] = arith.constant 8 : index
+    // CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index
+    // CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index
     // CHECK: %[[MSK0:.*]] = vector.create_mask %[[C8]], %[[C80]], %[[C32]], %[[C16]] : vector<16x8x32x16xi1>
-    // CHECK: %[[READ0:.*]] = vector.mask %[[MSK0]] { vector.transfer_read %[[SRC]]{{.*}}} : vector<16x8x32x16xi1> -> vector<16x8x32x16xf32>
+    // CHECK: %[[READ0:.*]] = vector.mask %[[MSK0]] { vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : vector<16x8x32x16xi1> -> vector<16x8x32x16xf32>
     // CHECK: %[[TRANSP0:.*]] = vector.transpose %[[READ0]], [0, 2, 1, 3] : vector<16x8x32x16xf32> to vector<16x32x8x16xf32>
     // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP0]] : vector<16x32x8x16xf32> to vector<512x128xf32>
     // CHECK: %[[C01:.*]] = arith.constant 0 : index
@@ -1171,9 +1171,9 @@ func.func @test_vectorize_unpack(%source: tensor<8x8x32x16xf32>, %dest: tensor<2
 // CHECK-SAME:      %[[SRC:.*]]: tensor<8x8x32x16xf32>
 // CHECK-SAME:      %[[DEST:.*]]: tensor<256x128xf32>
 func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> {
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-  // CHECK: %[[C0:.*]] = arith.constant 0 : index
-  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-AD: %[[C0:.*]] = arith.constant 0 : index
+  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
   // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [0, 2, 1, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32>
   // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32>
   // CHECK: %[[C00:.*]] = arith.constant 0 : index
@@ -1196,9 +1196,9 @@ func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest:
 // CHECK-SAME:      %[[SRC:.*]]: tensor<8x8x32x16xf32>
 // CHECK-SAME:      %[[DEST:.*]]: tensor<256x128xf32>
   func.func @test_vectorize_unpack_with_outer_perm(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> {
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-  // CHECK: %[[C0:.*]] = arith.constant 0 : index
-  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
   // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 2, 0, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32>
   // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32>
   // CHECK: %[[C00:.*]] = arith.constant 0 : index
@@ -1221,9 +1221,9 @@ func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest:
 // CHECK-SAME:      %[[SRC:.*]]: tensor<8x8x32x16xf32>
 // CHECK-SAME:      %[[DEST:.*]]: tensor<256x128xf32>
 func.func @test_vectorize_unpack_no_vector_sizes(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> {
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-  // CHECK: %[[C0:.*]] = arith.constant 0 : index
-  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> 
   // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [0, 2, 1, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32>
   // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32>
   // CHECK: %[[C00:.*]] = arith.constant 0 : index
@@ -1246,9 +1246,9 @@ func.func @test_vectorize_unpack_no_vector_sizes(%source: tensor<8x8x32x16xf32>,
 // CHECK-SAME:      %[[SRC:.*]]: tensor<8x4x16x16xf32>
 // CHECK-SAME:      %[[DEST:.*]]: tensor<64x127xf32>
 func.func @test_vectorize_unpack_no_vector_sizes_slice_output(%source: tensor<8x4x16x16xf32>, %dest: tensor<64x127xf32>) -> tensor<64x127xf32> {
-  //      CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-  //      CHECK: %[[C0:.*]] = arith.constant 0 : index
-  //      CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x4x16x16xf32>, vector<8x4x16x16xf32>
+  //  CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  //  CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+  //      CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x4x16x16xf32>, vector<8x4x16x16xf32>
   //      CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 2, 0, 3] : vector<8x4x16x16xf32> to vector<4x16x8x16xf32>
   //      CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<4x16x8x16xf32> to vector<64x128xf32>
   //      CHECK: %[[C00:.*]] = arith.constant 0 : index
@@ -1275,9 +1275,9 @@ func.func @test_vectorize_unpack_no_vector_sizes_permute(%source: tensor<4x7x4xf
    %0 = linalg.unpack %source outer_dims_perm=[1, 0] inner_dims_pos = [1] inner_tiles = [4] into %dest : tensor<4x7x4xf32> -> tensor<7x16xf32>
    return %0 : tensor<7x16xf32>
  }
-  // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
-  // CHECK: %[[C0:.*]] = arith.constant 0 : index
-  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<4x7x4xf32>, vector<4x7x4xf32>
+  // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32
+  // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
+  // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<4x7x4xf32>, vector<4x7x4xf32>
   // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 0, 2] : vector<4x7x4xf32> to vector<7x4x4xf32>
   // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<7x4x4xf32> to vector<7x16xf32>
   // CHECK: %[[C00:.*]] = arith.constant 0 : index
@@ -1308,7 +1308,7 @@ func.func @test_vectorize_pack(%src: tensor<32x8x16xf32>, %dest: tensor<4x1x32x1
   %pack = linalg.pack %src outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %dest : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
   return %pack : tensor<4x1x32x16x2xf32>
 }
-//  CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
+//  CHECK-DAG: %[[CST:.*]] = ub.poison : f32
 //  CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //      CHECK: %[[READ:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %[[CST]]
 // CHECK-SAME:    {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
@@ -1376,7 +1376,7 @@ func.func @test_vectorize_dynamic_pack(%src: tensor<?x?xf32>, %dest: tensor<?x?x
   return %pack : tensor<?x?x16x2xf32>
 }
 
-//  CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
+//  CHECK-DAG: %[[CST:.*]] = ub.poison : f32
 //  CHECK-DAG: %[[C0_1:.*]] = arith.constant 0 : index
 //  CHECK-DAG: %[[C0_0:.*]] = arith.constant 0 : index
 //  CHECK-DAG: %[[C1_0:.*]] = arith.constant 1 : index
@@ -1417,7 +1417,7 @@ func.func @test_vectorize_pack_no_vector_sizes(%src: tensor<64x4xf32>, %dest: te
   %pack = linalg.pack %src outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %dest : tensor<64x4xf32> -> tensor<2x4x16x2xf32>
   return %pack : tensor<2x4x16x2xf32>
 }
-//  CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
+//  CHECK-DAG: %[[CST:.*]] = ub.poison : f32
 //  CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //      CHECK: %[[READ:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CST]]
 // CHECK-SAME:    {in_bounds = [true, true]} : tensor<64x4xf32>, vector<64x4xf32>



More information about the libcxx-commits mailing list