[mlir] [llvm] [mlir] Lower math dialect later in gpu-lower-to-nvvm-pipeline (PR #78556)

Guray Ozen via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 18 01:49:59 PST 2024


https://github.com/grypp updated https://github.com/llvm/llvm-project/pull/78556

>From 949ed715c3ce6da12f4559b9e8206bbc743c2a9d Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Thu, 18 Jan 2024 10:48:24 +0100
Subject: [PATCH 1/3] [mlir] Lower math dialect later in
 gpu-lower-to-nvvm-pipeline

This PR moves lowering of math dialect later in the pipeline. Because math dialect is lowered correctly by `createConvertGpuOpsToNVVMOps` for GPU target, and it needs to run it first.
---
 .../GPU/Pipelines/GPUToNVVMPipeline.cpp        |  2 +-
 mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir  | 18 ++++++++++++++++++
 2 files changed, 19 insertions(+), 1 deletion(-)
 create mode 100644 mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir

diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
index 0b4739214bf2f19..935f0deaf9c8a69 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
@@ -51,7 +51,6 @@ void buildCommonPassPipeline(
   pm.addPass(createConvertVectorToSCFPass());
   pm.addPass(createConvertSCFToCFPass());
   pm.addPass(createConvertNVVMToLLVMPass());
-  pm.addPass(createConvertMathToLLVMPass());
   pm.addPass(createConvertFuncToLLVMPass());
   pm.addPass(memref::createExpandStridedMetadataPass());
 
@@ -98,6 +97,7 @@ void buildHostPostPipeline(OpPassManager &pm,
   GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions;
   gpuModuleToBinaryPassOptions.compilationTarget = options.cubinFormat;
   pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions));
+  pm.addPass(createConvertMathToLLVMPass());
   pm.addPass(createCanonicalizerPass());
   pm.addPass(createCSEPass());
   pm.addPass(createReconcileUnrealizedCastsPass());
diff --git a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
new file mode 100644
index 000000000000000..7c55059f4a84dc8
--- /dev/null
+++ b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
@@ -0,0 +1,18 @@
+// RUN: mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=isa" -split-input-file %s | FileCheck %s
+
+// CHECK-LABEL: llvm.func @test_math(%arg0: f32) {
+func.func @test_math(%arg0 : f32) {
+    %c2 = arith.constant 2 : index
+    %c1 = arith.constant 1 : index
+    // CHECK: gpu.launch_func  @test_math_kernel::@test_math_kernel
+    // CHECK: gpu.binary @test_math_kernel  [#gpu.object<#nvvm.target
+    gpu.launch 
+        blocks(%0, %1, %2) in (%3 = %c1, %4 = %c1, %5 = %c1) 
+        threads(%6, %7, %8) in (%9 = %c2, %10 = %c1, %11 = %c1) { 
+         %s1 = math.exp %arg0 : f32
+        gpu.printf "%f" %s1 : f32
+        gpu.printf "Hello from %d\n" %6 : index
+        gpu.terminator
+    }
+    return
+}
\ No newline at end of file

>From 2e5f38356542933e684e9befe6ed395fbc8a84f3 Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Thu, 18 Jan 2024 10:49:05 +0100
Subject: [PATCH 2/3] add nl

---
 stmatrix/everything-casted.mlir | 1267 +++++++++++++++++++++++++++++++
 1 file changed, 1267 insertions(+)
 create mode 100644 stmatrix/everything-casted.mlir

diff --git a/stmatrix/everything-casted.mlir b/stmatrix/everything-casted.mlir
new file mode 100644
index 000000000000000..28ebad10c6a3ceb
--- /dev/null
+++ b/stmatrix/everything-casted.mlir
@@ -0,0 +1,1267 @@
+// -----// IR Dump After ConvertNVGPUToNVVMPass (convert-nvgpu-to-nvvm) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = builtin.unrealized_conversion_cast %arg0 : memref<64x32xf16, 3> to !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)>
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %1 = llvm.mlir.constant(0 : i32) : i32
+      %2 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %3 = llvm.extractvalue %2[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %4 = llvm.insertvalue %1, %3[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %6 = llvm.insertvalue %1, %5[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %7 = llvm.insertvalue %1, %6[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %8 = llvm.insertvalue %1, %7[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %9 = llvm.insertvalue %1, %8[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %1, %9[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %1, %10[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %11, %2[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %13 = builtin.unrealized_conversion_cast %arg1 : i64 to !nvgpu.warpgroup.descriptor<tensor = memref<64x16xf16, 3>>
+      %14 = builtin.unrealized_conversion_cast %arg2 : i64 to !nvgpu.warpgroup.descriptor<tensor = memref<16x32xf16, 3>>
+      nvvm.wgmma.fence.aligned
+      %15 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %16 = llvm.extractvalue %12[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %17 = nvvm.wgmma.mma_async %arg1, %arg2, %16, <m = 64, n = 32, k = 16>, D[<f16>, <one>, <wrapped>], A[<f16>, <one>, <row>], B[<f16>, <one>, <row>] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      %18 = llvm.insertvalue %17, %15[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      nvvm.wgmma.commit.group.sync.aligned
+      nvvm.wgmma.wait.group.sync.aligned 1
+      %19 = llvm.extractvalue %18[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %20 = llvm.mlir.constant(1 : i32) : i32
+      %21 = llvm.mlir.constant(2 : i32) : i32
+      %22 = llvm.mlir.constant(4 : i32) : i32
+      %23 = llvm.mlir.constant(8 : i32) : i32
+      %24 = llvm.mlir.constant(16 : i32) : i32
+      %25 = llvm.mlir.constant(32 : i32) : i32
+      %26 = nvvm.read.ptx.sreg.tid.x : i32
+      %27 = llvm.urem %26, %25  : i32
+      %28 = llvm.udiv %26, %25  : i32
+      %29 = llvm.udiv %27, %22  : i32
+      %30 = llvm.urem %27, %22  : i32
+      %31 = llvm.mul %30, %21  : i32
+      %32 = llvm.mul %28, %24  : i32
+      %33 = llvm.add %29, %32  : i32
+      %34 = llvm.mlir.constant(0 : i32) : i32
+      %35 = llvm.mul %34, %23  : i32
+      %36 = llvm.add %33, %35  : i32
+      %37 = llvm.mlir.constant(0 : i32) : i32
+      %38 = llvm.mul %37, %23  : i32
+      %39 = llvm.add %31, %38  : i32
+      %40 = arith.index_cast %36 : i32 to index
+      %41 = arith.index_cast %39 : i32 to index
+      %42 = llvm.extractvalue %19[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %43 = llvm.bitcast %42 : i32 to vector<2xf16>
+      vector.store %43, %arg0[%40, %41] : memref<64x32xf16, 3>, vector<2xf16>
+      %44 = llvm.mlir.constant(1 : i32) : i32
+      %45 = llvm.mul %44, %23  : i32
+      %46 = llvm.add %33, %45  : i32
+      %47 = llvm.mlir.constant(0 : i32) : i32
+      %48 = llvm.mul %47, %23  : i32
+      %49 = llvm.add %31, %48  : i32
+      %50 = arith.index_cast %46 : i32 to index
+      %51 = arith.index_cast %49 : i32 to index
+      %52 = llvm.extractvalue %19[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %53 = llvm.bitcast %52 : i32 to vector<2xf16>
+      vector.store %53, %arg0[%50, %51] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After GpuKernelOutlining (gpu-kernel-outlining) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = builtin.unrealized_conversion_cast %arg0 : memref<64x32xf16, 3> to !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)>
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %1 = llvm.mlir.constant(0 : i32) : i32
+      %2 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %3 = llvm.extractvalue %2[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %4 = llvm.insertvalue %1, %3[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %6 = llvm.insertvalue %1, %5[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %7 = llvm.insertvalue %1, %6[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %8 = llvm.insertvalue %1, %7[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %9 = llvm.insertvalue %1, %8[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %1, %9[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %1, %10[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %11, %2[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %13 = builtin.unrealized_conversion_cast %arg1 : i64 to !nvgpu.warpgroup.descriptor<tensor = memref<64x16xf16, 3>>
+      %14 = builtin.unrealized_conversion_cast %arg2 : i64 to !nvgpu.warpgroup.descriptor<tensor = memref<16x32xf16, 3>>
+      nvvm.wgmma.fence.aligned
+      %15 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %16 = llvm.extractvalue %12[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %17 = nvvm.wgmma.mma_async %arg1, %arg2, %16, <m = 64, n = 32, k = 16>, D[<f16>, <one>, <wrapped>], A[<f16>, <one>, <row>], B[<f16>, <one>, <row>] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      %18 = llvm.insertvalue %17, %15[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      nvvm.wgmma.commit.group.sync.aligned
+      nvvm.wgmma.wait.group.sync.aligned 1
+      %19 = llvm.extractvalue %18[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %20 = llvm.mlir.constant(1 : i32) : i32
+      %21 = llvm.mlir.constant(2 : i32) : i32
+      %22 = llvm.mlir.constant(4 : i32) : i32
+      %23 = llvm.mlir.constant(8 : i32) : i32
+      %24 = llvm.mlir.constant(16 : i32) : i32
+      %25 = llvm.mlir.constant(32 : i32) : i32
+      %26 = nvvm.read.ptx.sreg.tid.x : i32
+      %27 = llvm.urem %26, %25  : i32
+      %28 = llvm.udiv %26, %25  : i32
+      %29 = llvm.udiv %27, %22  : i32
+      %30 = llvm.urem %27, %22  : i32
+      %31 = llvm.mul %30, %21  : i32
+      %32 = llvm.mul %28, %24  : i32
+      %33 = llvm.add %29, %32  : i32
+      %34 = llvm.mlir.constant(0 : i32) : i32
+      %35 = llvm.mul %34, %23  : i32
+      %36 = llvm.add %33, %35  : i32
+      %37 = llvm.mlir.constant(0 : i32) : i32
+      %38 = llvm.mul %37, %23  : i32
+      %39 = llvm.add %31, %38  : i32
+      %40 = arith.index_cast %36 : i32 to index
+      %41 = arith.index_cast %39 : i32 to index
+      %42 = llvm.extractvalue %19[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %43 = llvm.bitcast %42 : i32 to vector<2xf16>
+      vector.store %43, %arg0[%40, %41] : memref<64x32xf16, 3>, vector<2xf16>
+      %44 = llvm.mlir.constant(1 : i32) : i32
+      %45 = llvm.mul %44, %23  : i32
+      %46 = llvm.add %33, %45  : i32
+      %47 = llvm.mlir.constant(0 : i32) : i32
+      %48 = llvm.mul %47, %23  : i32
+      %49 = llvm.add %31, %48  : i32
+      %50 = arith.index_cast %46 : i32 to index
+      %51 = arith.index_cast %49 : i32 to index
+      %52 = llvm.extractvalue %19[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %53 = llvm.bitcast %52 : i32 to vector<2xf16>
+      vector.store %53, %arg0[%50, %51] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertVectorToSCF (convert-vector-to-scf) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %7 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %8 = llvm.extractvalue %7[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %9 = llvm.insertvalue %6, %8[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %6, %9[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %6, %10[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %6, %11[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %13 = llvm.insertvalue %6, %12[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %14 = llvm.insertvalue %6, %13[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %15 = llvm.insertvalue %6, %14[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %16 = llvm.insertvalue %6, %15[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      nvvm.wgmma.fence.aligned
+      %17 = nvvm.wgmma.mma_async %arg1, %arg2, %16, <m = 64, n = 32, k = 16>, D[<f16>, <one>, <wrapped>], A[<f16>, <one>, <row>], B[<f16>, <one>, <row>] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      nvvm.wgmma.commit.group.sync.aligned
+      nvvm.wgmma.wait.group.sync.aligned 1
+      %18 = nvvm.read.ptx.sreg.tid.x : i32
+      %19 = llvm.urem %18, %0  : i32
+      %20 = llvm.udiv %18, %0  : i32
+      %21 = llvm.udiv %19, %3  : i32
+      %22 = llvm.urem %19, %3  : i32
+      %23 = llvm.mul %22, %4  : i32
+      %24 = llvm.mul %20, %1  : i32
+      %25 = llvm.add %21, %24  : i32
+      %26 = llvm.mul %6, %2  : i32
+      %27 = llvm.add %25, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %23, %28  : i32
+      %30 = arith.index_cast %27 : i32 to index
+      %31 = arith.index_cast %29 : i32 to index
+      %32 = llvm.extractvalue %17[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %33 = llvm.bitcast %32 : i32 to vector<2xf16>
+      vector.store %33, %arg0[%30, %31] : memref<64x32xf16, 3>, vector<2xf16>
+      %34 = llvm.mul %5, %2  : i32
+      %35 = llvm.add %25, %34  : i32
+      %36 = llvm.mul %6, %2  : i32
+      %37 = llvm.add %23, %36  : i32
+      %38 = arith.index_cast %35 : i32 to index
+      %39 = arith.index_cast %37 : i32 to index
+      %40 = llvm.extractvalue %17[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %41 = llvm.bitcast %40 : i32 to vector<2xf16>
+      vector.store %41, %arg0[%38, %39] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After SCFToControlFlow (convert-scf-to-cf) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %7 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %8 = llvm.extractvalue %7[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %9 = llvm.insertvalue %6, %8[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %6, %9[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %6, %10[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %6, %11[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %13 = llvm.insertvalue %6, %12[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %14 = llvm.insertvalue %6, %13[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %15 = llvm.insertvalue %6, %14[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %16 = llvm.insertvalue %6, %15[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      nvvm.wgmma.fence.aligned
+      %17 = nvvm.wgmma.mma_async %arg1, %arg2, %16, <m = 64, n = 32, k = 16>, D[<f16>, <one>, <wrapped>], A[<f16>, <one>, <row>], B[<f16>, <one>, <row>] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      nvvm.wgmma.commit.group.sync.aligned
+      nvvm.wgmma.wait.group.sync.aligned 1
+      %18 = nvvm.read.ptx.sreg.tid.x : i32
+      %19 = llvm.urem %18, %0  : i32
+      %20 = llvm.udiv %18, %0  : i32
+      %21 = llvm.udiv %19, %3  : i32
+      %22 = llvm.urem %19, %3  : i32
+      %23 = llvm.mul %22, %4  : i32
+      %24 = llvm.mul %20, %1  : i32
+      %25 = llvm.add %21, %24  : i32
+      %26 = llvm.mul %6, %2  : i32
+      %27 = llvm.add %25, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %23, %28  : i32
+      %30 = arith.index_cast %27 : i32 to index
+      %31 = arith.index_cast %29 : i32 to index
+      %32 = llvm.extractvalue %17[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %33 = llvm.bitcast %32 : i32 to vector<2xf16>
+      vector.store %33, %arg0[%30, %31] : memref<64x32xf16, 3>, vector<2xf16>
+      %34 = llvm.mul %5, %2  : i32
+      %35 = llvm.add %25, %34  : i32
+      %36 = llvm.mul %6, %2  : i32
+      %37 = llvm.add %23, %36  : i32
+      %38 = arith.index_cast %35 : i32 to index
+      %39 = arith.index_cast %37 : i32 to index
+      %40 = llvm.extractvalue %17[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %41 = llvm.bitcast %40 : i32 to vector<2xf16>
+      vector.store %41, %arg0[%38, %39] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertNVVMToLLVMPass (convert-nvvm-to-llvm) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %7 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %8 = llvm.extractvalue %7[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %9 = llvm.insertvalue %6, %8[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %6, %9[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %6, %10[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %6, %11[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %13 = llvm.insertvalue %6, %12[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %14 = llvm.insertvalue %6, %13[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %15 = llvm.insertvalue %6, %14[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %16 = llvm.insertvalue %6, %15[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %17 = llvm.mlir.constant(1 : i32) : i32
+      %18 = llvm.mlir.constant(1 : i32) : i32
+      %19 = llvm.mlir.constant(1 : i32) : i32
+      %20 = llvm.mlir.constant(0 : i32) : i32
+      %21 = llvm.mlir.constant(1 : i32) : i32
+      %22 = llvm.extractvalue %16[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %23 = llvm.extractvalue %16[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %24 = llvm.extractvalue %16[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.extractvalue %16[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %26 = llvm.extractvalue %16[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %27 = llvm.extractvalue %16[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %28 = llvm.extractvalue %16[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %29 = llvm.extractvalue %16[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %30 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %22, %23, %24, %25, %26, %27, %28, %29, %arg1, %arg2, %17, %18, %19, %20, %21 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      %31 = llvm.mlir.constant(1 : i32) : i32
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %31 : (i32) -> ()
+      %32 = nvvm.read.ptx.sreg.tid.x : i32
+      %33 = llvm.urem %32, %0  : i32
+      %34 = llvm.udiv %32, %0  : i32
+      %35 = llvm.udiv %33, %3  : i32
+      %36 = llvm.urem %33, %3  : i32
+      %37 = llvm.mul %36, %4  : i32
+      %38 = llvm.mul %34, %1  : i32
+      %39 = llvm.add %35, %38  : i32
+      %40 = llvm.mul %6, %2  : i32
+      %41 = llvm.add %39, %40  : i32
+      %42 = llvm.mul %6, %2  : i32
+      %43 = llvm.add %37, %42  : i32
+      %44 = arith.index_cast %41 : i32 to index
+      %45 = arith.index_cast %43 : i32 to index
+      %46 = llvm.extractvalue %30[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %47 = llvm.bitcast %46 : i32 to vector<2xf16>
+      vector.store %47, %arg0[%44, %45] : memref<64x32xf16, 3>, vector<2xf16>
+      %48 = llvm.mul %5, %2  : i32
+      %49 = llvm.add %39, %48  : i32
+      %50 = llvm.mul %6, %2  : i32
+      %51 = llvm.add %37, %50  : i32
+      %52 = arith.index_cast %49 : i32 to index
+      %53 = arith.index_cast %51 : i32 to index
+      %54 = llvm.extractvalue %30[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %55 = llvm.bitcast %54 : i32 to vector<2xf16>
+      vector.store %55, %arg0[%52, %53] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertMathToLLVMPass (convert-math-to-llvm) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %7 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %8 = llvm.extractvalue %7[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %9 = llvm.insertvalue %6, %8[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %6, %9[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %6, %10[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %6, %11[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %13 = llvm.insertvalue %6, %12[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %14 = llvm.insertvalue %6, %13[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %15 = llvm.insertvalue %6, %14[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %16 = llvm.insertvalue %6, %15[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %17 = llvm.mlir.constant(1 : i32) : i32
+      %18 = llvm.mlir.constant(1 : i32) : i32
+      %19 = llvm.mlir.constant(1 : i32) : i32
+      %20 = llvm.mlir.constant(0 : i32) : i32
+      %21 = llvm.mlir.constant(1 : i32) : i32
+      %22 = llvm.extractvalue %16[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %23 = llvm.extractvalue %16[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %24 = llvm.extractvalue %16[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.extractvalue %16[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %26 = llvm.extractvalue %16[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %27 = llvm.extractvalue %16[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %28 = llvm.extractvalue %16[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %29 = llvm.extractvalue %16[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %30 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %22, %23, %24, %25, %26, %27, %28, %29, %arg1, %arg2, %17, %18, %19, %20, %21 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      %31 = llvm.mlir.constant(1 : i32) : i32
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %31 : (i32) -> ()
+      %32 = nvvm.read.ptx.sreg.tid.x : i32
+      %33 = llvm.urem %32, %0  : i32
+      %34 = llvm.udiv %32, %0  : i32
+      %35 = llvm.udiv %33, %3  : i32
+      %36 = llvm.urem %33, %3  : i32
+      %37 = llvm.mul %36, %4  : i32
+      %38 = llvm.mul %34, %1  : i32
+      %39 = llvm.add %35, %38  : i32
+      %40 = llvm.mul %6, %2  : i32
+      %41 = llvm.add %39, %40  : i32
+      %42 = llvm.mul %6, %2  : i32
+      %43 = llvm.add %37, %42  : i32
+      %44 = arith.index_cast %41 : i32 to index
+      %45 = arith.index_cast %43 : i32 to index
+      %46 = llvm.extractvalue %30[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %47 = llvm.bitcast %46 : i32 to vector<2xf16>
+      vector.store %47, %arg0[%44, %45] : memref<64x32xf16, 3>, vector<2xf16>
+      %48 = llvm.mul %5, %2  : i32
+      %49 = llvm.add %39, %48  : i32
+      %50 = llvm.mul %6, %2  : i32
+      %51 = llvm.add %37, %50  : i32
+      %52 = arith.index_cast %49 : i32 to index
+      %53 = arith.index_cast %51 : i32 to index
+      %54 = llvm.extractvalue %30[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %55 = llvm.bitcast %54 : i32 to vector<2xf16>
+      vector.store %55, %arg0[%52, %53] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertFuncToLLVMPass (convert-func-to-llvm) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      %7 = llvm.mlir.undef : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)>
+      %8 = llvm.extractvalue %7[0] : !llvm.struct<(struct<(i32, i32, i32, i32, i32, i32, i32, i32)>)> 
+      %9 = llvm.insertvalue %6, %8[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %10 = llvm.insertvalue %6, %9[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %11 = llvm.insertvalue %6, %10[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %12 = llvm.insertvalue %6, %11[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %13 = llvm.insertvalue %6, %12[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %14 = llvm.insertvalue %6, %13[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %15 = llvm.insertvalue %6, %14[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %16 = llvm.insertvalue %6, %15[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %17 = llvm.mlir.constant(1 : i32) : i32
+      %18 = llvm.mlir.constant(1 : i32) : i32
+      %19 = llvm.mlir.constant(1 : i32) : i32
+      %20 = llvm.mlir.constant(0 : i32) : i32
+      %21 = llvm.mlir.constant(1 : i32) : i32
+      %22 = llvm.extractvalue %16[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %23 = llvm.extractvalue %16[1] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %24 = llvm.extractvalue %16[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.extractvalue %16[3] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %26 = llvm.extractvalue %16[4] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %27 = llvm.extractvalue %16[5] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %28 = llvm.extractvalue %16[6] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %29 = llvm.extractvalue %16[7] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %30 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %22, %23, %24, %25, %26, %27, %28, %29, %arg1, %arg2, %17, %18, %19, %20, %21 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      %31 = llvm.mlir.constant(1 : i32) : i32
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %31 : (i32) -> ()
+      %32 = nvvm.read.ptx.sreg.tid.x : i32
+      %33 = llvm.urem %32, %0  : i32
+      %34 = llvm.udiv %32, %0  : i32
+      %35 = llvm.udiv %33, %3  : i32
+      %36 = llvm.urem %33, %3  : i32
+      %37 = llvm.mul %36, %4  : i32
+      %38 = llvm.mul %34, %1  : i32
+      %39 = llvm.add %35, %38  : i32
+      %40 = llvm.mul %6, %2  : i32
+      %41 = llvm.add %39, %40  : i32
+      %42 = llvm.mul %6, %2  : i32
+      %43 = llvm.add %37, %42  : i32
+      %44 = llvm.sext %41 : i32 to i64
+      %45 = builtin.unrealized_conversion_cast %44 : i64 to index
+      %46 = llvm.sext %43 : i32 to i64
+      %47 = builtin.unrealized_conversion_cast %46 : i64 to index
+      %48 = llvm.extractvalue %30[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %49 = llvm.bitcast %48 : i32 to vector<2xf16>
+      vector.store %49, %arg0[%45, %47] : memref<64x32xf16, 3>, vector<2xf16>
+      %50 = llvm.mul %5, %2  : i32
+      %51 = llvm.add %39, %50  : i32
+      %52 = llvm.mul %6, %2  : i32
+      %53 = llvm.add %37, %52  : i32
+      %54 = llvm.sext %51 : i32 to i64
+      %55 = builtin.unrealized_conversion_cast %54 : i64 to index
+      %56 = llvm.sext %53 : i32 to i64
+      %57 = builtin.unrealized_conversion_cast %56 : i64 to index
+      %58 = llvm.extractvalue %30[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %59 = llvm.bitcast %58 : i32 to vector<2xf16>
+      vector.store %59, %arg0[%55, %57] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ExpandStridedMetadata (expand-strided-metadata) //----- //
+module {
+  gpu.module @asd {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After GpuNVVMAttachTarget (nvvm-attach-target) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertAffineToStandard (lower-affine) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ArithToLLVMConversionPass (convert-arith-to-llvm) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After ConvertIndexToLLVMPass (convert-index-to-llvm) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After Canonicalizer (canonicalize) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.mul %6, %2  : i32
+      %19 = llvm.add %13, %18  : i32
+      %20 = llvm.sext %17 : i32 to i64
+      %21 = builtin.unrealized_conversion_cast %20 : i64 to index
+      %22 = llvm.sext %19 : i32 to i64
+      %23 = builtin.unrealized_conversion_cast %22 : i64 to index
+      %24 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %25 = llvm.bitcast %24 : i32 to vector<2xf16>
+      vector.store %25, %arg0[%21, %23] : memref<64x32xf16, 3>, vector<2xf16>
+      %26 = llvm.mul %5, %2  : i32
+      %27 = llvm.add %15, %26  : i32
+      %28 = llvm.mul %6, %2  : i32
+      %29 = llvm.add %13, %28  : i32
+      %30 = llvm.sext %27 : i32 to i64
+      %31 = builtin.unrealized_conversion_cast %30 : i64 to index
+      %32 = llvm.sext %29 : i32 to i64
+      %33 = builtin.unrealized_conversion_cast %32 : i64 to index
+      %34 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %35 = llvm.bitcast %34 : i32 to vector<2xf16>
+      vector.store %35, %arg0[%31, %33] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After CSE (cse) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+      %0 = llvm.mlir.constant(32 : i32) : i32
+      %1 = llvm.mlir.constant(16 : i32) : i32
+      %2 = llvm.mlir.constant(8 : i32) : i32
+      %3 = llvm.mlir.constant(4 : i32) : i32
+      %4 = llvm.mlir.constant(2 : i32) : i32
+      %5 = llvm.mlir.constant(1 : i32) : i32
+      %6 = llvm.mlir.constant(0 : i32) : i32
+      memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+      %8 = nvvm.read.ptx.sreg.tid.x : i32
+      %9 = llvm.urem %8, %0  : i32
+      %10 = llvm.udiv %8, %0  : i32
+      %11 = llvm.udiv %9, %3  : i32
+      %12 = llvm.urem %9, %3  : i32
+      %13 = llvm.mul %12, %4  : i32
+      %14 = llvm.mul %10, %1  : i32
+      %15 = llvm.add %11, %14  : i32
+      %16 = llvm.mul %6, %2  : i32
+      %17 = llvm.add %15, %16  : i32
+      %18 = llvm.add %13, %16  : i32
+      %19 = llvm.sext %17 : i32 to i64
+      %20 = builtin.unrealized_conversion_cast %19 : i64 to index
+      %21 = llvm.sext %18 : i32 to i64
+      %22 = builtin.unrealized_conversion_cast %21 : i64 to index
+      %23 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %24 = llvm.bitcast %23 : i32 to vector<2xf16>
+      vector.store %24, %arg0[%20, %22] : memref<64x32xf16, 3>, vector<2xf16>
+      %25 = llvm.mul %5, %2  : i32
+      %26 = llvm.add %15, %25  : i32
+      %27 = llvm.sext %26 : i32 to i64
+      %28 = builtin.unrealized_conversion_cast %27 : i64 to index
+      %29 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %30 = llvm.bitcast %29 : i32 to vector<2xf16>
+      vector.store %30, %arg0[%28, %22] : memref<64x32xf16, 3>, vector<2xf16>
+      gpu.return
+    }
+  }
+}
+
+
+// -----// IR Dump After StripDebugInfo (strip-debuginfo) //----- //
+gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+  gpu.func @foo(%arg0: memref<64x32xf16, 3>, %arg1: i64, %arg2: i64) {
+    %0 = llvm.mlir.constant(32 : i32) : i32
+    %1 = llvm.mlir.constant(16 : i32) : i32
+    %2 = llvm.mlir.constant(8 : i32) : i32
+    %3 = llvm.mlir.constant(4 : i32) : i32
+    %4 = llvm.mlir.constant(2 : i32) : i32
+    %5 = llvm.mlir.constant(1 : i32) : i32
+    %6 = llvm.mlir.constant(0 : i32) : i32
+    memref.assume_alignment %arg0, 32 : memref<64x32xf16, 3>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+    %7 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %6, %6, %6, %6, %6, %6, %6, %6, %arg1, %arg2, %5, %5, %5, %6, %5 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %5 : (i32) -> ()
+    %8 = nvvm.read.ptx.sreg.tid.x : i32
+    %9 = llvm.urem %8, %0  : i32
+    %10 = llvm.udiv %8, %0  : i32
+    %11 = llvm.udiv %9, %3  : i32
+    %12 = llvm.urem %9, %3  : i32
+    %13 = llvm.mul %12, %4  : i32
+    %14 = llvm.mul %10, %1  : i32
+    %15 = llvm.add %11, %14  : i32
+    %16 = llvm.mul %6, %2  : i32
+    %17 = llvm.add %15, %16  : i32
+    %18 = llvm.add %13, %16  : i32
+    %19 = llvm.sext %17 : i32 to i64
+    %20 = builtin.unrealized_conversion_cast %19 : i64 to index
+    %21 = llvm.sext %18 : i32 to i64
+    %22 = builtin.unrealized_conversion_cast %21 : i64 to index
+    %23 = llvm.extractvalue %7[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %24 = llvm.bitcast %23 : i32 to vector<2xf16>
+    vector.store %24, %arg0[%20, %22] : memref<64x32xf16, 3>, vector<2xf16>
+    %25 = llvm.mul %5, %2  : i32
+    %26 = llvm.add %15, %25  : i32
+    %27 = llvm.sext %26 : i32 to i64
+    %28 = builtin.unrealized_conversion_cast %27 : i64 to index
+    %29 = llvm.extractvalue %7[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %30 = llvm.bitcast %29 : i32 to vector<2xf16>
+    vector.store %30, %arg0[%28, %22] : memref<64x32xf16, 3>, vector<2xf16>
+    gpu.return
+  }
+}
+
+// -----// IR Dump After ConvertGpuOpsToNVVMOps (convert-gpu-to-nvvm) //----- //
+gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+  llvm.func @foo(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<3>, %arg2: i64, %arg3: i64, %arg4: i64, %arg5: i64, %arg6: i64, %arg7: i64, %arg8: i64) {
+    %0 = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)>
+    %1 = llvm.insertvalue %arg0, %0[0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %2 = llvm.insertvalue %arg1, %1[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %3 = llvm.insertvalue %arg2, %2[2] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %4 = llvm.insertvalue %arg3, %3[3, 0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %5 = llvm.insertvalue %arg5, %4[4, 0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %6 = llvm.insertvalue %arg4, %5[3, 1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %7 = llvm.insertvalue %arg6, %6[4, 1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %8 = llvm.mlir.constant(32 : i32) : i32
+    %9 = llvm.mlir.constant(16 : i32) : i32
+    %10 = llvm.mlir.constant(8 : i32) : i32
+    %11 = llvm.mlir.constant(4 : i32) : i32
+    %12 = llvm.mlir.constant(2 : i32) : i32
+    %13 = llvm.mlir.constant(1 : i32) : i32
+    %14 = llvm.mlir.constant(0 : i32) : i32
+    %15 = llvm.extractvalue %7[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %16 = llvm.mlir.constant(0 : index) : i64
+    %17 = llvm.mlir.constant(31 : index) : i64
+    %18 = llvm.ptrtoint %15 : !llvm.ptr<3> to i64
+    %19 = llvm.and %18, %17  : i64
+    %20 = llvm.icmp "eq" %19, %16 : i64
+    "llvm.intr.assume"(%20) : (i1) -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+    %21 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %14, %14, %14, %14, %14, %14, %14, %14, %arg7, %arg8, %13, %13, %13, %14, %13 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %13 : (i32) -> ()
+    %22 = nvvm.read.ptx.sreg.tid.x : i32
+    %23 = llvm.urem %22, %8  : i32
+    %24 = llvm.udiv %22, %8  : i32
+    %25 = llvm.udiv %23, %11  : i32
+    %26 = llvm.urem %23, %11  : i32
+    %27 = llvm.mul %26, %12  : i32
+    %28 = llvm.mul %24, %9  : i32
+    %29 = llvm.add %25, %28  : i32
+    %30 = llvm.mul %14, %10  : i32
+    %31 = llvm.add %29, %30  : i32
+    %32 = llvm.add %27, %30  : i32
+    %33 = llvm.sext %31 : i32 to i64
+    %34 = builtin.unrealized_conversion_cast %33 : i64 to index
+    %35 = llvm.sext %32 : i32 to i64
+    %36 = builtin.unrealized_conversion_cast %35 : i64 to index
+    %37 = llvm.extractvalue %21[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %38 = llvm.bitcast %37 : i32 to vector<2xf16>
+    %39 = llvm.extractvalue %7[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %40 = llvm.mlir.constant(32 : index) : i64
+    %41 = llvm.mul %33, %40  : i64
+    %42 = llvm.add %41, %35  : i64
+    %43 = llvm.getelementptr %39[%42] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %38, %43 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    %44 = llvm.mul %13, %10  : i32
+    %45 = llvm.add %29, %44  : i32
+    %46 = llvm.sext %45 : i32 to i64
+    %47 = builtin.unrealized_conversion_cast %46 : i64 to index
+    %48 = llvm.extractvalue %21[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %49 = llvm.bitcast %48 : i32 to vector<2xf16>
+    %50 = llvm.extractvalue %7[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)> 
+    %51 = llvm.mlir.constant(32 : index) : i64
+    %52 = llvm.mul %46, %51  : i64
+    %53 = llvm.add %52, %35  : i64
+    %54 = llvm.getelementptr %50[%53] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %49, %54 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    llvm.return
+  }
+}
+
+// -----// IR Dump After Canonicalizer (canonicalize) //----- //
+gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+  llvm.func @foo(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<3>, %arg2: i64, %arg3: i64, %arg4: i64, %arg5: i64, %arg6: i64, %arg7: i64, %arg8: i64) {
+    %0 = llvm.mlir.constant(32 : index) : i64
+    %1 = llvm.mlir.constant(31 : index) : i64
+    %2 = llvm.mlir.constant(0 : index) : i64
+    %3 = llvm.mlir.constant(0 : i32) : i32
+    %4 = llvm.mlir.constant(1 : i32) : i32
+    %5 = llvm.mlir.constant(2 : i32) : i32
+    %6 = llvm.mlir.constant(4 : i32) : i32
+    %7 = llvm.mlir.constant(8 : i32) : i32
+    %8 = llvm.mlir.constant(16 : i32) : i32
+    %9 = llvm.mlir.constant(32 : i32) : i32
+    %10 = llvm.ptrtoint %arg1 : !llvm.ptr<3> to i64
+    %11 = llvm.and %10, %1  : i64
+    %12 = llvm.icmp "eq" %11, %2 : i64
+    "llvm.intr.assume"(%12) : (i1) -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+    %13 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %3, %3, %3, %3, %3, %3, %3, %3, %arg7, %arg8, %4, %4, %4, %3, %4 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %4 : (i32) -> ()
+    %14 = nvvm.read.ptx.sreg.tid.x : i32
+    %15 = llvm.urem %14, %9  : i32
+    %16 = llvm.udiv %14, %9  : i32
+    %17 = llvm.udiv %15, %6  : i32
+    %18 = llvm.urem %15, %6  : i32
+    %19 = llvm.mul %18, %5  : i32
+    %20 = llvm.mul %16, %8  : i32
+    %21 = llvm.add %17, %20  : i32
+    %22 = llvm.mul %3, %7  : i32
+    %23 = llvm.add %21, %22  : i32
+    %24 = llvm.add %19, %22  : i32
+    %25 = llvm.sext %23 : i32 to i64
+    %26 = llvm.sext %24 : i32 to i64
+    %27 = llvm.extractvalue %13[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %28 = llvm.bitcast %27 : i32 to vector<2xf16>
+    %29 = llvm.mul %25, %0  : i64
+    %30 = llvm.add %29, %26  : i64
+    %31 = llvm.getelementptr %arg1[%30] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %28, %31 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    %32 = llvm.mul %4, %7  : i32
+    %33 = llvm.add %21, %32  : i32
+    %34 = llvm.sext %33 : i32 to i64
+    %35 = llvm.extractvalue %13[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %36 = llvm.bitcast %35 : i32 to vector<2xf16>
+    %37 = llvm.mul %34, %0  : i64
+    %38 = llvm.add %37, %26  : i64
+    %39 = llvm.getelementptr %arg1[%38] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %36, %39 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    llvm.return
+  }
+}
+
+// -----// IR Dump After CSE (cse) //----- //
+gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+  llvm.func @foo(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<3>, %arg2: i64, %arg3: i64, %arg4: i64, %arg5: i64, %arg6: i64, %arg7: i64, %arg8: i64) {
+    %0 = llvm.mlir.constant(32 : index) : i64
+    %1 = llvm.mlir.constant(31 : index) : i64
+    %2 = llvm.mlir.constant(0 : index) : i64
+    %3 = llvm.mlir.constant(0 : i32) : i32
+    %4 = llvm.mlir.constant(1 : i32) : i32
+    %5 = llvm.mlir.constant(2 : i32) : i32
+    %6 = llvm.mlir.constant(4 : i32) : i32
+    %7 = llvm.mlir.constant(8 : i32) : i32
+    %8 = llvm.mlir.constant(16 : i32) : i32
+    %9 = llvm.mlir.constant(32 : i32) : i32
+    %10 = llvm.ptrtoint %arg1 : !llvm.ptr<3> to i64
+    %11 = llvm.and %10, %1  : i64
+    %12 = llvm.icmp "eq" %11, %2 : i64
+    "llvm.intr.assume"(%12) : (i1) -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+    %13 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %3, %3, %3, %3, %3, %3, %3, %3, %arg7, %arg8, %4, %4, %4, %3, %4 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %4 : (i32) -> ()
+    %14 = nvvm.read.ptx.sreg.tid.x : i32
+    %15 = llvm.urem %14, %9  : i32
+    %16 = llvm.udiv %14, %9  : i32
+    %17 = llvm.udiv %15, %6  : i32
+    %18 = llvm.urem %15, %6  : i32
+    %19 = llvm.mul %18, %5  : i32
+    %20 = llvm.mul %16, %8  : i32
+    %21 = llvm.add %17, %20  : i32
+    %22 = llvm.mul %3, %7  : i32
+    %23 = llvm.add %21, %22  : i32
+    %24 = llvm.add %19, %22  : i32
+    %25 = llvm.sext %23 : i32 to i64
+    %26 = llvm.sext %24 : i32 to i64
+    %27 = llvm.extractvalue %13[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %28 = llvm.bitcast %27 : i32 to vector<2xf16>
+    %29 = llvm.mul %25, %0  : i64
+    %30 = llvm.add %29, %26  : i64
+    %31 = llvm.getelementptr %arg1[%30] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %28, %31 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    %32 = llvm.mul %4, %7  : i32
+    %33 = llvm.add %21, %32  : i32
+    %34 = llvm.sext %33 : i32 to i64
+    %35 = llvm.extractvalue %13[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %36 = llvm.bitcast %35 : i32 to vector<2xf16>
+    %37 = llvm.mul %34, %0  : i64
+    %38 = llvm.add %37, %26  : i64
+    %39 = llvm.getelementptr %arg1[%38] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %36, %39 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    llvm.return
+  }
+}
+
+// -----// IR Dump After ReconcileUnrealizedCasts (reconcile-unrealized-casts) //----- //
+gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+  llvm.func @foo(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<3>, %arg2: i64, %arg3: i64, %arg4: i64, %arg5: i64, %arg6: i64, %arg7: i64, %arg8: i64) {
+    %0 = llvm.mlir.constant(32 : index) : i64
+    %1 = llvm.mlir.constant(31 : index) : i64
+    %2 = llvm.mlir.constant(0 : index) : i64
+    %3 = llvm.mlir.constant(0 : i32) : i32
+    %4 = llvm.mlir.constant(1 : i32) : i32
+    %5 = llvm.mlir.constant(2 : i32) : i32
+    %6 = llvm.mlir.constant(4 : i32) : i32
+    %7 = llvm.mlir.constant(8 : i32) : i32
+    %8 = llvm.mlir.constant(16 : i32) : i32
+    %9 = llvm.mlir.constant(32 : i32) : i32
+    %10 = llvm.ptrtoint %arg1 : !llvm.ptr<3> to i64
+    %11 = llvm.and %10, %1  : i64
+    %12 = llvm.icmp "eq" %11, %2 : i64
+    "llvm.intr.assume"(%12) : (i1) -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+    %13 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %3, %3, %3, %3, %3, %3, %3, %3, %arg7, %arg8, %4, %4, %4, %3, %4 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+    llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %4 : (i32) -> ()
+    %14 = nvvm.read.ptx.sreg.tid.x : i32
+    %15 = llvm.urem %14, %9  : i32
+    %16 = llvm.udiv %14, %9  : i32
+    %17 = llvm.udiv %15, %6  : i32
+    %18 = llvm.urem %15, %6  : i32
+    %19 = llvm.mul %18, %5  : i32
+    %20 = llvm.mul %16, %8  : i32
+    %21 = llvm.add %17, %20  : i32
+    %22 = llvm.mul %3, %7  : i32
+    %23 = llvm.add %21, %22  : i32
+    %24 = llvm.add %19, %22  : i32
+    %25 = llvm.sext %23 : i32 to i64
+    %26 = llvm.sext %24 : i32 to i64
+    %27 = llvm.extractvalue %13[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %28 = llvm.bitcast %27 : i32 to vector<2xf16>
+    %29 = llvm.mul %25, %0  : i64
+    %30 = llvm.add %29, %26  : i64
+    %31 = llvm.getelementptr %arg1[%30] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %28, %31 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    %32 = llvm.mul %4, %7  : i32
+    %33 = llvm.add %21, %32  : i32
+    %34 = llvm.sext %33 : i32 to i64
+    %35 = llvm.extractvalue %13[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+    %36 = llvm.bitcast %35 : i32 to vector<2xf16>
+    %37 = llvm.mul %34, %0  : i64
+    %38 = llvm.add %37, %26  : i64
+    %39 = llvm.getelementptr %arg1[%38] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+    llvm.store %36, %39 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+    llvm.return
+  }
+}
+
+// -----// IR Dump After GpuToLLVMConversionPass (gpu-to-llvm) //----- //
+module {
+  gpu.module @asd [#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">]  {
+    llvm.func @foo(%arg0: !llvm.ptr<3>, %arg1: !llvm.ptr<3>, %arg2: i64, %arg3: i64, %arg4: i64, %arg5: i64, %arg6: i64, %arg7: i64, %arg8: i64) {
+      %0 = llvm.mlir.constant(32 : index) : i64
+      %1 = llvm.mlir.constant(31 : index) : i64
+      %2 = llvm.mlir.constant(0 : index) : i64
+      %3 = llvm.mlir.constant(0 : i32) : i32
+      %4 = llvm.mlir.constant(1 : i32) : i32
+      %5 = llvm.mlir.constant(2 : i32) : i32
+      %6 = llvm.mlir.constant(4 : i32) : i32
+      %7 = llvm.mlir.constant(8 : i32) : i32
+      %8 = llvm.mlir.constant(16 : i32) : i32
+      %9 = llvm.mlir.constant(32 : i32) : i32
+      %10 = llvm.ptrtoint %arg1 : !llvm.ptr<3> to i64
+      %11 = llvm.and %10, %1  : i64
+      %12 = llvm.icmp "eq" %11, %2 : i64
+      "llvm.intr.assume"(%12) : (i1) -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.fence.sync.aligned;", ""  : () -> ()
+      %13 = llvm.inline_asm has_side_effects asm_dialect = att "{\0A.reg .pred p;\0Asetp.ne.b32 p, $18, 0;\0Awgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {$0, $1, $2, $3, $4, $5, $6, $7}, $16, $17, p, $19,  $20, $21,  $22;\0A}\0A", "=r,=r,=r,=r,=r,=r,=r,=r,0,1,2,3,4,5,6,7,l,l,n,n,n,n,n" %3, %3, %3, %3, %3, %3, %3, %3, %arg7, %arg8, %4, %4, %4, %3, %4 : (i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i32, i32, i32, i32, i32) -> !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)>
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.commit_group.sync.aligned;", ""  : () -> ()
+      llvm.inline_asm has_side_effects asm_dialect = att "wgmma.wait_group.sync.aligned $0;", "n" %4 : (i32) -> ()
+      %14 = nvvm.read.ptx.sreg.tid.x : i32
+      %15 = llvm.urem %14, %9  : i32
+      %16 = llvm.udiv %14, %9  : i32
+      %17 = llvm.udiv %15, %6  : i32
+      %18 = llvm.urem %15, %6  : i32
+      %19 = llvm.mul %18, %5  : i32
+      %20 = llvm.mul %16, %8  : i32
+      %21 = llvm.add %17, %20  : i32
+      %22 = llvm.mul %3, %7  : i32
+      %23 = llvm.add %21, %22  : i32
+      %24 = llvm.add %19, %22  : i32
+      %25 = llvm.sext %23 : i32 to i64
+      %26 = llvm.sext %24 : i32 to i64
+      %27 = llvm.extractvalue %13[0] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %28 = llvm.bitcast %27 : i32 to vector<2xf16>
+      %29 = llvm.mul %25, %0  : i64
+      %30 = llvm.add %29, %26  : i64
+      %31 = llvm.getelementptr %arg1[%30] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+      llvm.store %28, %31 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+      %32 = llvm.mul %4, %7  : i32
+      %33 = llvm.add %21, %32  : i32
+      %34 = llvm.sext %33 : i32 to i64
+      %35 = llvm.extractvalue %13[2] : !llvm.struct<(i32, i32, i32, i32, i32, i32, i32, i32)> 
+      %36 = llvm.bitcast %35 : i32 to vector<2xf16>
+      %37 = llvm.mul %34, %0  : i64
+      %38 = llvm.add %37, %26  : i64
+      %39 = llvm.getelementptr %arg1[%38] : (!llvm.ptr<3>, i64) -> !llvm.ptr<3>, f16
+      llvm.store %36, %39 {alignment = 2 : i64} : vector<2xf16>, !llvm.ptr<3>
+      llvm.return
+    }
+  }
+}
+
+
+PTX for module: "asd"
+//
+// Generated by LLVM NVPTX Back-End
+//
+
+.version 8.0
+.target sm_90a
+.address_size 64
+
+	// .globl	foo
+
+.visible .func foo(
+	.param .b64 foo_param_0,
+	.param .b64 foo_param_1,
+	.param .b64 foo_param_2,
+	.param .b64 foo_param_3,
+	.param .b64 foo_param_4,
+	.param .b64 foo_param_5,
+	.param .b64 foo_param_6,
+	.param .b64 foo_param_7,
+	.param .b64 foo_param_8
+)
+{
+	.reg .b32 	%r<25>;
+	.reg .b64 	%rd<13>;
+
+	// begin inline asm
+	wgmma.fence.sync.aligned;
+	// end inline asm
+	ld.param.u64 	%rd3, [foo_param_1];
+	ld.param.u64 	%rd1, [foo_param_7];
+	ld.param.u64 	%rd2, [foo_param_8];
+	mov.b32 	%r1, 0;
+	mov.u32 	%r2, %r1;
+	mov.u32 	%r3, %r1;
+	mov.u32 	%r4, %r1;
+	mov.u32 	%r5, %r1;
+	mov.u32 	%r6, %r1;
+	mov.u32 	%r7, %r1;
+	mov.u32 	%r8, %r1;
+	// begin inline asm
+	{
+.reg .pred p;
+setp.ne.b32 p, 1, 0;
+wgmma.mma_async.sync.aligned.m64n32k16.f16.f16.f16 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, %rd1, %rd2, p, 1,  1, 0,  1;
+}
+
+	// end inline asm
+	// begin inline asm
+	wgmma.commit_group.sync.aligned;
+	// end inline asm
+	// begin inline asm
+	wgmma.wait_group.sync.aligned 1;
+	// end inline asm
+	mov.u32 	%r17, %tid.x;
+	bfe.u32 	%r18, %r17, 2, 3;
+	shl.b32 	%r19, %r17, 1;
+	and.b32  	%r20, %r19, 6;
+	shr.u32 	%r21, %r17, 1;
+	and.b32  	%r22, %r21, 2147483632;
+	or.b32  	%r23, %r18, %r22;
+	cvt.u64.u32 	%rd4, %r20;
+	mul.wide.u32 	%rd5, %r23, 32;
+	or.b64  	%rd6, %rd5, %rd4;
+	shl.b64 	%rd7, %rd6, 1;
+	add.s64 	%rd8, %rd3, %rd7;
+	st.shared.u32 	[%rd8], %r1;
+	or.b32  	%r24, %r23, 8;
+	mul.wide.u32 	%rd9, %r24, 32;
+	or.b64  	%rd10, %rd9, %rd4;
+	shl.b64 	%rd11, %rd10, 1;
+	add.s64 	%rd12, %rd3, %rd11;
+	st.shared.u32 	[%rd12], %r3;
+	ret;
+
+}
+
+// -----// IR Dump After GpuModuleToBinaryPass (gpu-module-to-binary) //----- //
+module {
+  gpu.binary @asd  [#gpu.object<#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">, "P\EDU\BA\01\00\10\00\F0\05\00\00\00\00\00\00\02\00\01\01@\00\00\00\B0\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\07\00\01\00Z\00\00\00\00\00\00\00\00\00\00\00\11\00\10\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\7FELF\02\01\013\07\00\00\00\00\00\00\00\02\00\BE\00{\00\00\00\00\00\00\00\00\00\00\00@\02\00\00\00\00\00\00\00\01\00\00\00\00\00\00Z\0DZ\00@\008\00\02\00@\00\05\00\01\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00@\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\0B\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\9B\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\13\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\E8\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00\02\00\00\00\01\00\00\00\08\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00@\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\01\01H\00\00\00\B8\02\00\00\00\00\00\00\B2\02\00\00@\00\00\00\00\00\08\00Z\00\00\00\00\00\00\00\00\00\00\00\11 \10\00\00\00\00\00\00\00\00\00\00\00\00\00A\05\00\00\00\00\00\00\00\00\00\00\00\00\00\00\F2!\0A\0A\0A\0A.version 8.0\0A.target sm_90a\0A.address_size 640\00\F0\0Eisible .func foo(\0A.param .b64\11\00\11_\0F\00?_0,\19\00\04\1F1\19\00\05\1F2\19\00\05\1F3\19\00\05\1F4\19\00\05\1F5\19\00\05\1F6\19\00\05\1F7\19\00\05\F4\088\0A)\0A{\0A.reg .b32 %r<25>;\12\00\F2\1E64 %rd<13>;\0A\0A\0A\09wgmma.fence.sync.aligned;\0A\0A\09ldb\00\22.u5\00F3, [h\00L1];\0A\22\00\191\22\00\1F7\22\00\00\192\22\00s8];\0Amov\A8\00B1, 0\10\00\11u\10\00 2,\15\00\08\12\00\1D3\12\00\1D4\12\00\1D5\12\00\1D6\12\00\1D7\12\00\138\12\00$\0A\09<\01\F1\00pred p;\0Asetp.ne\A4\002p, \A5\00\027\01Qmma_a6\01\08;\01\C6.m64n32k16.f\04\00! {\E1\00\03\D6\00$3,\BC\00$5,\A2\00\01\95\0038},I\01\02-\01\02r\00\00u\00`, 1;\0A}\B4\01\04\B6\01\CDcommit_group\BD\01\04%\00/wa#\00\02\11 \EC\00\06\00\01\101~\00\A4tid.x;\0Abfe\16\00#8,\1C\00\942, 3;\0Ashl\AC\01\149\1A\00c1;\0Aand\17\00320,\1D\00\106.\00\13rH\00/21.\00\03#2,\1D\00\E42147483632;\0Aor\1F\00%3,\84\00p22;\0AcvtG\02\03T\00\11d?\01\1022\02dul.wid\B5\003d5,=\00\04N\00\02{\02%6,\1D\00#d4\CC\00\02\1A\00#7, \00\00\9E\00Bdd.s\17\00#8,\F6\02\00#\00\B1;\0Ast.sharedh\00\10[!\00\13]6\02\06\B3\00\144|\00\1E8\98\00#9,\22\00\0A\98\00510,\1E\00\0B\99\00411,\22\00\0A\9B\00'12\9C\00/11\9D\00\01!12\9E\00\C03;\0Aret;\0A\0A}\0A\00\00\00\00\00\00\00">]
+}
+
+
+// -----// IR Dump After Canonicalizer (canonicalize) //----- //
+module {
+  gpu.binary @asd  [#gpu.object<#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">, "P\EDU\BA\01\00\10\00\F0\05\00\00\00\00\00\00\02\00\01\01@\00\00\00\B0\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\07\00\01\00Z\00\00\00\00\00\00\00\00\00\00\00\11\00\10\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\7FELF\02\01\013\07\00\00\00\00\00\00\00\02\00\BE\00{\00\00\00\00\00\00\00\00\00\00\00@\02\00\00\00\00\00\00\00\01\00\00\00\00\00\00Z\0DZ\00@\008\00\02\00@\00\05\00\01\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00@\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\0B\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\9B\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\13\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\E8\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00\02\00\00\00\01\00\00\00\08\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00@\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\01\01H\00\00\00\B8\02\00\00\00\00\00\00\B2\02\00\00@\00\00\00\00\00\08\00Z\00\00\00\00\00\00\00\00\00\00\00\11 \10\00\00\00\00\00\00\00\00\00\00\00\00\00A\05\00\00\00\00\00\00\00\00\00\00\00\00\00\00\F2!\0A\0A\0A\0A.version 8.0\0A.target sm_90a\0A.address_size 640\00\F0\0Eisible .func foo(\0A.param .b64\11\00\11_\0F\00?_0,\19\00\04\1F1\19\00\05\1F2\19\00\05\1F3\19\00\05\1F4\19\00\05\1F5\19\00\05\1F6\19\00\05\1F7\19\00\05\F4\088\0A)\0A{\0A.reg .b32 %r<25>;\12\00\F2\1E64 %rd<13>;\0A\0A\0A\09wgmma.fence.sync.aligned;\0A\0A\09ldb\00\22.u5\00F3, [h\00L1];\0A\22\00\191\22\00\1F7\22\00\00\192\22\00s8];\0Amov\A8\00B1, 0\10\00\11u\10\00 2,\15\00\08\12\00\1D3\12\00\1D4\12\00\1D5\12\00\1D6\12\00\1D7\12\00\138\12\00$\0A\09<\01\F1\00pred p;\0Asetp.ne\A4\002p, \A5\00\027\01Qmma_a6\01\08;\01\C6.m64n32k16.f\04\00! {\E1\00\03\D6\00$3,\BC\00$5,\A2\00\01\95\0038},I\01\02-\01\02r\00\00u\00`, 1;\0A}\B4\01\04\B6\01\CDcommit_group\BD\01\04%\00/wa#\00\02\11 \EC\00\06\00\01\101~\00\A4tid.x;\0Abfe\16\00#8,\1C\00\942, 3;\0Ashl\AC\01\149\1A\00c1;\0Aand\17\00320,\1D\00\106.\00\13rH\00/21.\00\03#2,\1D\00\E42147483632;\0Aor\1F\00%3,\84\00p22;\0AcvtG\02\03T\00\11d?\01\1022\02dul.wid\B5\003d5,=\00\04N\00\02{\02%6,\1D\00#d4\CC\00\02\1A\00#7, \00\00\9E\00Bdd.s\17\00#8,\F6\02\00#\00\B1;\0Ast.sharedh\00\10[!\00\13]6\02\06\B3\00\144|\00\1E8\98\00#9,\22\00\0A\98\00510,\1E\00\0B\99\00411,\22\00\0A\9B\00'12\9C\00/11\9D\00\01!12\9E\00\C03;\0Aret;\0A\0A}\0A\00\00\00\00\00\00\00">]
+}
+
+
+// -----// IR Dump After CSE (cse) //----- //
+module {
+  gpu.binary @asd  [#gpu.object<#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">, "P\EDU\BA\01\00\10\00\F0\05\00\00\00\00\00\00\02\00\01\01@\00\00\00\B0\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\07\00\01\00Z\00\00\00\00\00\00\00\00\00\00\00\11\00\10\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\7FELF\02\01\013\07\00\00\00\00\00\00\00\02\00\BE\00{\00\00\00\00\00\00\00\00\00\00\00@\02\00\00\00\00\00\00\00\01\00\00\00\00\00\00Z\0DZ\00@\008\00\02\00@\00\05\00\01\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00@\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\0B\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\9B\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\13\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\E8\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00\02\00\00\00\01\00\00\00\08\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00@\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\01\01H\00\00\00\B8\02\00\00\00\00\00\00\B2\02\00\00@\00\00\00\00\00\08\00Z\00\00\00\00\00\00\00\00\00\00\00\11 \10\00\00\00\00\00\00\00\00\00\00\00\00\00A\05\00\00\00\00\00\00\00\00\00\00\00\00\00\00\F2!\0A\0A\0A\0A.version 8.0\0A.target sm_90a\0A.address_size 640\00\F0\0Eisible .func foo(\0A.param .b64\11\00\11_\0F\00?_0,\19\00\04\1F1\19\00\05\1F2\19\00\05\1F3\19\00\05\1F4\19\00\05\1F5\19\00\05\1F6\19\00\05\1F7\19\00\05\F4\088\0A)\0A{\0A.reg .b32 %r<25>;\12\00\F2\1E64 %rd<13>;\0A\0A\0A\09wgmma.fence.sync.aligned;\0A\0A\09ldb\00\22.u5\00F3, [h\00L1];\0A\22\00\191\22\00\1F7\22\00\00\192\22\00s8];\0Amov\A8\00B1, 0\10\00\11u\10\00 2,\15\00\08\12\00\1D3\12\00\1D4\12\00\1D5\12\00\1D6\12\00\1D7\12\00\138\12\00$\0A\09<\01\F1\00pred p;\0Asetp.ne\A4\002p, \A5\00\027\01Qmma_a6\01\08;\01\C6.m64n32k16.f\04\00! {\E1\00\03\D6\00$3,\BC\00$5,\A2\00\01\95\0038},I\01\02-\01\02r\00\00u\00`, 1;\0A}\B4\01\04\B6\01\CDcommit_group\BD\01\04%\00/wa#\00\02\11 \EC\00\06\00\01\101~\00\A4tid.x;\0Abfe\16\00#8,\1C\00\942, 3;\0Ashl\AC\01\149\1A\00c1;\0Aand\17\00320,\1D\00\106.\00\13rH\00/21.\00\03#2,\1D\00\E42147483632;\0Aor\1F\00%3,\84\00p22;\0AcvtG\02\03T\00\11d?\01\1022\02dul.wid\B5\003d5,=\00\04N\00\02{\02%6,\1D\00#d4\CC\00\02\1A\00#7, \00\00\9E\00Bdd.s\17\00#8,\F6\02\00#\00\B1;\0Ast.sharedh\00\10[!\00\13]6\02\06\B3\00\144|\00\1E8\98\00#9,\22\00\0A\98\00510,\1E\00\0B\99\00411,\22\00\0A\9B\00'12\9C\00/11\9D\00\01!12\9E\00\C03;\0Aret;\0A\0A}\0A\00\00\00\00\00\00\00">]
+}
+
+
+// -----// IR Dump After ReconcileUnrealizedCasts (reconcile-unrealized-casts) //----- //
+module {
+  gpu.binary @asd  [#gpu.object<#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">, "P\EDU\BA\01\00\10\00\F0\05\00\00\00\00\00\00\02\00\01\01@\00\00\00\B0\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\07\00\01\00Z\00\00\00\00\00\00\00\00\00\00\00\11\00\10\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\7FELF\02\01\013\07\00\00\00\00\00\00\00\02\00\BE\00{\00\00\00\00\00\00\00\00\00\00\00@\02\00\00\00\00\00\00\00\01\00\00\00\00\00\00Z\0DZ\00@\008\00\02\00@\00\05\00\01\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00@\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\0B\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\9B\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\13\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\E8\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00\02\00\00\00\01\00\00\00\08\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00@\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\01\01H\00\00\00\B8\02\00\00\00\00\00\00\B2\02\00\00@\00\00\00\00\00\08\00Z\00\00\00\00\00\00\00\00\00\00\00\11 \10\00\00\00\00\00\00\00\00\00\00\00\00\00A\05\00\00\00\00\00\00\00\00\00\00\00\00\00\00\F2!\0A\0A\0A\0A.version 8.0\0A.target sm_90a\0A.address_size 640\00\F0\0Eisible .func foo(\0A.param .b64\11\00\11_\0F\00?_0,\19\00\04\1F1\19\00\05\1F2\19\00\05\1F3\19\00\05\1F4\19\00\05\1F5\19\00\05\1F6\19\00\05\1F7\19\00\05\F4\088\0A)\0A{\0A.reg .b32 %r<25>;\12\00\F2\1E64 %rd<13>;\0A\0A\0A\09wgmma.fence.sync.aligned;\0A\0A\09ldb\00\22.u5\00F3, [h\00L1];\0A\22\00\191\22\00\1F7\22\00\00\192\22\00s8];\0Amov\A8\00B1, 0\10\00\11u\10\00 2,\15\00\08\12\00\1D3\12\00\1D4\12\00\1D5\12\00\1D6\12\00\1D7\12\00\138\12\00$\0A\09<\01\F1\00pred p;\0Asetp.ne\A4\002p, \A5\00\027\01Qmma_a6\01\08;\01\C6.m64n32k16.f\04\00! {\E1\00\03\D6\00$3,\BC\00$5,\A2\00\01\95\0038},I\01\02-\01\02r\00\00u\00`, 1;\0A}\B4\01\04\B6\01\CDcommit_group\BD\01\04%\00/wa#\00\02\11 \EC\00\06\00\01\101~\00\A4tid.x;\0Abfe\16\00#8,\1C\00\942, 3;\0Ashl\AC\01\149\1A\00c1;\0Aand\17\00320,\1D\00\106.\00\13rH\00/21.\00\03#2,\1D\00\E42147483632;\0Aor\1F\00%3,\84\00p22;\0AcvtG\02\03T\00\11d?\01\1022\02dul.wid\B5\003d5,=\00\04N\00\02{\02%6,\1D\00#d4\CC\00\02\1A\00#7, \00\00\9E\00Bdd.s\17\00#8,\F6\02\00#\00\B1;\0Ast.sharedh\00\10[!\00\13]6\02\06\B3\00\144|\00\1E8\98\00#9,\22\00\0A\98\00510,\1E\00\0B\99\00411,\22\00\0A\9B\00'12\9C\00/11\9D\00\01!12\9E\00\C03;\0Aret;\0A\0A}\0A\00\00\00\00\00\00\00">]
+}
+
+
+module {
+  gpu.binary @asd  [#gpu.object<#nvvm.target<O = 3, chip = "sm_90a", features = "+ptx80">, "P\EDU\BA\01\00\10\00\F0\05\00\00\00\00\00\00\02\00\01\01@\00\00\00\B0\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\07\00\01\00Z\00\00\00\00\00\00\00\00\00\00\00\11\00\10\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\7FELF\02\01\013\07\00\00\00\00\00\00\00\02\00\BE\00{\00\00\00\00\00\00\00\00\00\00\00@\02\00\00\00\00\00\00\00\01\00\00\00\00\00\00Z\0DZ\00@\008\00\02\00@\00\05\00\01\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00.shstrtab\00.strtab\00.symtab\00.symtab_shndx\00.nv.uft.entry\00.nv.info\00.debug_frame\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00@\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\0B\00\00\00\03\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\9B\00\00\00\00\00\00\00M\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\13\00\00\00\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\E8\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00\02\00\00\00\01\00\00\00\08\00\00\00\00\00\00\00\18\00\00\00\00\00\00\00@\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\01\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\06\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\00\00\04\00\00\00@\02\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00p\00\00\00\00\00\00\00p\00\00\00\00\00\00\00\08\00\00\00\00\00\00\00\01\00\01\01H\00\00\00\B8\02\00\00\00\00\00\00\B2\02\00\00@\00\00\00\00\00\08\00Z\00\00\00\00\00\00\00\00\00\00\00\11 \10\00\00\00\00\00\00\00\00\00\00\00\00\00A\05\00\00\00\00\00\00\00\00\00\00\00\00\00\00\F2!\0A\0A\0A\0A.version 8.0\0A.target sm_90a\0A.address_size 640\00\F0\0Eisible .func foo(\0A.param .b64\11\00\11_\0F\00?_0,\19\00\04\1F1\19\00\05\1F2\19\00\05\1F3\19\00\05\1F4\19\00\05\1F5\19\00\05\1F6\19\00\05\1F7\19\00\05\F4\088\0A)\0A{\0A.reg .b32 %r<25>;\12\00\F2\1E64 %rd<13>;\0A\0A\0A\09wgmma.fence.sync.aligned;\0A\0A\09ldb\00\22.u5\00F3, [h\00L1];\0A\22\00\191\22\00\1F7\22\00\00\192\22\00s8];\0Amov\A8\00B1, 0\10\00\11u\10\00 2,\15\00\08\12\00\1D3\12\00\1D4\12\00\1D5\12\00\1D6\12\00\1D7\12\00\138\12\00$\0A\09<\01\F1\00pred p;\0Asetp.ne\A4\002p, \A5\00\027\01Qmma_a6\01\08;\01\C6.m64n32k16.f\04\00! {\E1\00\03\D6\00$3,\BC\00$5,\A2\00\01\95\0038},I\01\02-\01\02r\00\00u\00`, 1;\0A}\B4\01\04\B6\01\CDcommit_group\BD\01\04%\00/wa#\00\02\11 \EC\00\06\00\01\101~\00\A4tid.x;\0Abfe\16\00#8,\1C\00\942, 3;\0Ashl\AC\01\149\1A\00c1;\0Aand\17\00320,\1D\00\106.\00\13rH\00/21.\00\03#2,\1D\00\E42147483632;\0Aor\1F\00%3,\84\00p22;\0AcvtG\02\03T\00\11d?\01\1022\02dul.wid\B5\003d5,=\00\04N\00\02{\02%6,\1D\00#d4\CC\00\02\1A\00#7, \00\00\9E\00Bdd.s\17\00#8,\F6\02\00#\00\B1;\0Ast.sharedh\00\10[!\00\13]6\02\06\B3\00\144|\00\1E8\98\00#9,\22\00\0A\98\00510,\1E\00\0B\99\00411,\22\00\0A\9B\00'12\9C\00/11\9D\00\01!12\9E\00\C03;\0Aret;\0A\0A}\0A\00\00\00\00\00\00\00">]
+}
+

>From acfbfc06bdba5a4bf22f00971bedffc019972141 Mon Sep 17 00:00:00 2001
From: Guray Ozen <guray.ozen at gmail.com>
Date: Thu, 18 Jan 2024 10:49:46 +0100
Subject: [PATCH 3/3] add nl

---
 mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
index 7c55059f4a84dc8..029f1469b1c7133 100644
--- a/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
+++ b/mlir/test/Dialect/GPU/test-nvvm-pipeline.mlir
@@ -15,4 +15,4 @@ func.func @test_math(%arg0 : f32) {
         gpu.terminator
     }
     return
-}
\ No newline at end of file
+}



More information about the llvm-commits mailing list