[Mlir-commits] [mlir] [mlir][spirv] Update argmax-kernel lit test for `convert-to-spirv` (PR #106586)

Angel Zhang llvmlistbot at llvm.org
Thu Aug 29 10:07:42 PDT 2024


https://github.com/angelz913 created https://github.com/llvm/llvm-project/pull/106586

This PR updates the `argmax-kernel` lit test under `ConvertToSPIRV` (for demonstrating the ability of using SPIR-V ops among other higher-level dialects). The original test was landed in #105010 but had a few mistakes, including using constants for variables like subgroup size and total number of elements in the input, etc. The PR fixed them so that the kernel module can be used for integration tests.

>From 6eed79a7727ff962bfcf5f8324cb9c23a2002ca3 Mon Sep 17 00:00:00 2001
From: Angel Zhang <angel.zhang at amd.com>
Date: Thu, 29 Aug 2024 16:53:41 +0000
Subject: [PATCH] [mlir][spirv] Update argmax-kernel lit test for
 convert-to-spirv

---
 .../ConvertToSPIRV/argmax-kernel.mlir         | 57 +++++++++++--------
 1 file changed, 32 insertions(+), 25 deletions(-)

diff --git a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
index 5cd1fead2527b1..8b1c3e54cbfb83 100644
--- a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
+++ b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
@@ -5,43 +5,50 @@ module attributes {
   spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader, Groups, GroupNonUniformArithmetic, GroupNonUniformBallot], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
   // CHECK-LABEL: spirv.module @{{.*}} Logical GLSL450
-  // CHECK-DAG: spirv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spirv.ptr<vector<3xi32>, Input>
+  // CHECK-DAG:   spirv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spirv.ptr<vector<3xi32>, Input>
+  // CHECK-DAG:   spirv.GlobalVariable @[[$SUBGROUPSIZE:.*]] built_in("SubgroupSize") : !spirv.ptr<i32, Input>
   // CHECK-LABEL: spirv.func @argmax
-  // CHECK-SAME: %[[ARG0:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>
-  // CHECK-SAME: %[[ARG1:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer>
+  // CHECK-SAME:  %[[ARG0:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<128 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0)>}
+  // CHECK-SAME:  %[[ARG1:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 1)>}
+  // CHECK-SAME:  %[[ARG2:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 2)>}
   gpu.module @kernels {
-    gpu.func @argmax(%input : memref<4xf32>, %output : memref<i32>) kernel
+    gpu.func @argmax(%input : memref<128xf32>, %output : memref<1xi32>, %total_count_buf : memref<1xi32>) kernel
       attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [32, 1, 1]>} {
       // CHECK: %[[C0:.*]] = spirv.Constant 0 : i32
       // CHECK: %[[C1:.*]] = spirv.Constant 1 : i32
-      // CHECK: %[[C32:.*]] = spirv.Constant 32 : i32
+      // CHECK: %[[ADDRESSSUBGROUPSIZE:.*]] = spirv.mlir.addressof @[[$SUBGROUPSIZE]]
+      // CHECK: %[[SUBGROUPSIZE:.*]] = spirv.Load "Input" %[[ADDRESSSUBGROUPSIZE]]
       // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spirv.mlir.addressof @[[$LOCALINVOCATIONIDVAR]]
       // CHECK: %[[LOCALINVOCATIONID:.*]] = spirv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]]
       // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spirv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}}
-      // CHECK: %[[AC0:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[LOCALINVOCATIONIDX]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
+      // CHECK: %[[AC:.*]] = spirv.AccessChain %[[ARG2]][%[[C0]], %[[C0]]] : !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer>, i32, i32
+      // CHECK: %[[LOAD:.*]] = spirv.Load "StorageBuffer" %[[AC]] : i32
+      // CHECK: %[[AC0:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[LOCALINVOCATIONIDX]]] : !spirv.ptr<!spirv.struct<(!spirv.array<128 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
       // CHECK: %[[LOAD0:.*]] = spirv.Load "StorageBuffer" %[[AC0]] : f32
+      // CHECK: %[[UB:.*]] = spirv.UDiv %[[LOAD]], %[[SUBGROUPSIZE]] : i32
       // CHECK: %[[FUNC0:.*]] = spirv.Variable : !spirv.ptr<i32, Function>
       // CHECK: %[[FUNC1:.*]] = spirv.Variable : !spirv.ptr<f32, Function>
-      %cst_0_idx = arith.constant 0 : index
-      %cst_1_i32 = arith.constant 1 : i32
-      %cst_1_idx = arith.constant 1 : index
-      %cst_32 = arith.constant 32 : i32
-      %num_batches = arith.divui %cst_1_i32, %cst_32 : i32
-      %tx = gpu.thread_id x
-      %tx_i32 = index.castu %tx : index to i32
-      %ub = index.castu %num_batches : i32 to index
+      %idx_0 = arith.constant 0 : index
+      %idx_1 = arith.constant 1 : index
+      %lane_count_idx = gpu.subgroup_size : index
+      %lane_count_i32 = index.castu %lane_count_idx : index to i32
+      %lane_id_idx = gpu.thread_id x
+      %lane_id_i32 = index.castu %lane_id_idx : index to i32
+      %total_count = memref.load %total_count_buf[%idx_0] : memref<1xi32>
       %lane_res_init = arith.constant 0 : i32
-      %lane_max_init = memref.load %input[%tx] : memref<4xf32>
+      %lane_max_init = memref.load %input[%lane_id_idx] : memref<128xf32>
+      %num_batches_i32 = arith.divui %total_count, %lane_count_i32 : i32
+      %num_batches_idx = index.castu %num_batches_i32 : i32 to index
 
       // CHECK: spirv.mlir.loop {
       // CHECK:   spirv.Branch ^[[HEADER:.*]](%[[C1]], %[[C0]], %[[LOAD0]] : i32, i32, f32)
       // CHECK: ^[[HEADER]](%[[INDVAR0:.*]]: i32, %[[INDVAR1:.*]]: i32, %[[INDVAR2:.*]]: f32):
-      // CHECK:   %[[SLT:.*]] = spirv.SLessThan %[[INDVAR0]], %[[C0]] : i32
+      // CHECK:   %[[SLT:.*]] = spirv.SLessThan %[[INDVAR0]], %[[UB]] : i32
       // CHECK:   spirv.BranchConditional %[[SLT]], ^[[BODY:.*]], ^[[MERGE:.*]]
       // CHECK: ^[[BODY]]:
-      // CHECK:   %[[MUL:.*]] = spirv.IMul %[[INDVAR0]], %[[C32]] : i32
+      // CHECK:   %[[MUL:.*]] = spirv.IMul %[[SUBGROUPSIZE]], %[[INDVAR0]] : i32
       // CHECK:   %[[ADD:.*]] = spirv.IAdd %[[MUL]], %[[LOCALINVOCATIONIDX]] : i32
-      // CHECK:   %[[AC1:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[ADD]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
+      // CHECK:   %[[AC1:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[ADD]]] : !spirv.ptr<!spirv.struct<(!spirv.array<128 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
       // CHECK:   %[[LOAD1:.*]] = spirv.Load "StorageBuffer" %[[AC1]] : f32
       // CHECK:   %[[OGT:.*]] = spirv.FOrdGreaterThan %[[LOAD1]], %[[INDVAR2]] : f32
       // CHECK:   %[[SELECT0:.*]] = spirv.Select %[[OGT]], %[[ADD]], %[[INDVAR1]] : i1, i32
@@ -55,13 +62,13 @@ module attributes {
       // CHECK: }
       // CHECK-DAG: %[[LANE_RES:.*]] = spirv.Load "Function" %[[FUNC0]] : i32
       // CHECK-DAG: %[[LANE_MAX:.*]] = spirv.Load "Function" %[[FUNC1]] : f32
-      %lane_res, %lane_max = scf.for %iter = %cst_1_idx to %ub step %cst_1_idx
+      %lane_res, %lane_max = scf.for %iter = %idx_1 to %num_batches_idx step %idx_1
       iter_args(%lane_res_iter = %lane_res_init, %lane_max_iter = %lane_max_init) -> (i32, f32) {
         %iter_i32 = index.castu %iter : index to i32
-        %mul = arith.muli %cst_32, %iter_i32 : i32
-        %idx_i32 = arith.addi %mul, %tx_i32 : i32
+        %mul = arith.muli %lane_count_i32, %iter_i32 : i32
+        %idx_i32 = arith.addi %mul, %lane_id_i32 : i32
         %idx = index.castu %idx_i32 : i32 to index
-        %elem = memref.load %input[%idx] : memref<4xf32>
+        %elem = memref.load %input[%idx] : memref<128xf32>
         %gt = arith.cmpf ogt, %elem, %lane_max_iter : f32
         %lane_res_next = arith.select %gt, %idx_i32, %lane_res_iter : i32
         %lane_max_next = arith.select %gt, %elem, %lane_max_iter : f32
@@ -72,12 +79,12 @@ module attributes {
       // CHECK: %[[OEQ:.*]] = spirv.FOrdEqual %[[LANE_MAX]], %[[SUBGROUP_MAX]] : f32
       // CHECK: %[[BALLOT:.*]] = spirv.GroupNonUniformBallot <Subgroup> %[[OEQ]] : vector<4xi32>
       // CHECK: %[[BALLOTLSB:.*]] = spirv.GroupNonUniformBallotFindLSB <Subgroup> %[[BALLOT]] : vector<4xi32>, i32
-      // CHECK: %[[EQ:.*]] = spirv.IEqual %[[LOCALINVOCATIONIDX]], %[[C1]] : i32
+      // CHECK: %[[EQ:.*]] = spirv.IEqual %[[BALLOTLSB]], %[[LOCALINVOCATIONIDX]] : i32
       %subgroup_max = gpu.subgroup_reduce maximumf %lane_max : (f32) -> (f32)
       %eq = arith.cmpf oeq, %lane_max, %subgroup_max : f32
       %ballot = spirv.GroupNonUniformBallot <Subgroup> %eq : vector<4xi32>
       %lsb = spirv.GroupNonUniformBallotFindLSB <Subgroup> %ballot : vector<4xi32>, i32
-      %cond = arith.cmpi eq, %cst_1_i32, %tx_i32 : i32
+      %cond = arith.cmpi eq, %lsb, %lane_id_i32 : i32
 
       // CHECK: spirv.mlir.selection {
       // CHECK:   spirv.BranchConditional %[[EQ]], ^[[TRUE:.*]], ^[[FALSE:.*]]
@@ -89,7 +96,7 @@ module attributes {
       // CHECK:   spirv.mlir.merge
       // CHECK: }
       scf.if %cond {
-        memref.store %lane_res, %output[] : memref<i32>
+        memref.store %lane_res, %output[%idx_0] : memref<1xi32>
       }
 
       // CHECK: spirv.Return



More information about the Mlir-commits mailing list