[Mlir-commits] [mlir] befd167 - [mlir][gpu] Fix cuda integration tests

Ivan Butygin llvmlistbot at llvm.org
Wed Dec 14 05:01:47 PST 2022


Author: Ivan Butygin
Date: 2022-12-14T14:01:00+01:00
New Revision: befd167050ce46517a799cf0f997e2f75c20eb3b

URL: https://github.com/llvm/llvm-project/commit/befd167050ce46517a799cf0f997e2f75c20eb3b
DIFF: https://github.com/llvm/llvm-project/commit/befd167050ce46517a799cf0f997e2f75c20eb3b.diff

LOG: [mlir][gpu] Fix cuda integration tests

https://reviews.llvm.org/D138758 has added `uniform` flag to gpu reduce ops, update integration tests.

Differential Revision: https://reviews.llvm.org/D140014

Added: 
    

Modified: 
    mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
    mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
    mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
index 0f61f1ebcbd6..8f51eb061a26 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
@@ -55,7 +55,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xi32>
-    %reduced = gpu.all_reduce and %val {} : (i32) -> (i32)
+    %reduced = gpu.all_reduce and %val uniform {} : (i32) -> (i32)
     memref.store %reduced, %sum[%bx] : memref<2xi32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
index 03948f6ee958..7b15e5c5e424 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
@@ -55,7 +55,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xi32>
-    %reduced = gpu.all_reduce max %val {} : (i32) -> (i32)
+    %reduced = gpu.all_reduce max %val uniform {} : (i32) -> (i32)
     memref.store %reduced, %sum[%bx] : memref<2xi32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
index 5e1127ebcce5..e2b27fc9f818 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
@@ -55,7 +55,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xi32>
-    %reduced = gpu.all_reduce min %val {} : (i32) -> (i32)
+    %reduced = gpu.all_reduce min %val uniform {} : (i32) -> (i32)
     memref.store %reduced, %sum[%bx] : memref<2xi32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
index 92f6a804ece1..05b2730a7cf3 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
@@ -28,7 +28,7 @@ func.func @main() {
     %idx = arith.addi %tx, %t2 : index
     %t3 = arith.index_cast %idx : index to i32
     %val = arith.sitofp %t3 : i32 to f32
-    %sum = gpu.all_reduce add %val {} : (f32) -> (f32)
+    %sum = gpu.all_reduce add %val uniform {} : (f32) -> (f32)
     memref.store %sum, %dst[%tz, %ty, %tx] : memref<?x?x?xf32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
index 3b988e29cb39..075d62534fb4 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
@@ -55,7 +55,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xi32>
-    %reduced = gpu.all_reduce or %val {} : (i32) -> (i32)
+    %reduced = gpu.all_reduce or %val uniform {} : (i32) -> (i32)
     memref.store %reduced, %sum[%bx] : memref<2xi32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
index 485bdcd5d0a3..b92cbce78461 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
@@ -20,7 +20,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one)
              threads(%tx, %ty, %tz) in (%block_x = %sx, %block_y = %one, %block_z = %one) {
     %val = arith.index_cast %tx : index to i32
-    %xor = gpu.all_reduce %val {
+    %xor = gpu.all_reduce %val uniform {
     ^bb(%lhs : i32, %rhs : i32):
       %xor = arith.xori %lhs, %rhs : i32
       "gpu.yield"(%xor) : (i32) -> ()

diff  --git a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
index eac5ecfc5b44..445de97c64ef 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
@@ -55,7 +55,7 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xi32>
-    %reduced = gpu.all_reduce xor %val {} : (i32) -> (i32)
+    %reduced = gpu.all_reduce xor %val uniform {} : (i32) -> (i32)
     memref.store %reduced, %sum[%bx] : memref<2xi32>
     gpu.terminator
   }

diff  --git a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
index c980f39c3dbb..eeabee1cf478 100644
--- a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
+++ b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
@@ -58,9 +58,9 @@ func.func @main() {
   gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c2, %grid_y = %c1, %grid_z = %c1)
              threads(%tx, %ty, %tz) in (%block_x = %c6, %block_y = %c1, %block_z = %c1) {
     %val = memref.load %data[%bx, %tx] : memref<2x6xf32>
-    %reduced0 = gpu.all_reduce add %val {} : (f32) -> (f32)
+    %reduced0 = gpu.all_reduce add %val uniform {} : (f32) -> (f32)
     memref.store %reduced0, %sum[%bx] : memref<2xf32>
-    %reduced1 = gpu.all_reduce mul %val {} : (f32) -> (f32)
+    %reduced1 = gpu.all_reduce mul %val uniform {} : (f32) -> (f32)
     memref.store %reduced1, %mul[%bx] : memref<2xf32>
     gpu.terminator
   }


        


More information about the Mlir-commits mailing list