[Mlir-commits] [mlir] 39e1c1f - Add GPU lowerings for the different log ops.

Adrian Kuegel llvmlistbot at llvm.org
Thu Feb 27 06:33:30 PST 2020


Author: Adrian Kuegel
Date: 2020-02-27T15:25:02+01:00
New Revision: 39e1c1fa9ee03e91751e505d747275e58069e6de

URL: https://github.com/llvm/llvm-project/commit/39e1c1fa9ee03e91751e505d747275e58069e6de
DIFF: https://github.com/llvm/llvm-project/commit/39e1c1fa9ee03e91751e505d747275e58069e6de.diff

LOG: Add GPU lowerings for the different log ops.

Summary: This adds GPU lowerings for log, log10 and log2.

Reviewers: mravishankar, herhut

Subscribers: jholewinski, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D75239

Added: 
    

Modified: 
    mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
    mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
    mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
    mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index bb7e18762a6d..a2f16b1103e9 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -681,8 +681,8 @@ class LowerGpuOpsToNVVMOpsPass
     populateGpuToNVVMConversionPatterns(converter, patterns);
     ConversionTarget target(getContext());
     target.addIllegalDialect<gpu::GPUDialect>();
-    target.addIllegalOp<LLVM::FAbsOp, LLVM::FCeilOp, LLVM::CosOp,
-                        LLVM::ExpOp>();
+    target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::FAbsOp, LLVM::FCeilOp,
+                        LLVM::LogOp, LLVM::Log10Op, LLVM::Log2Op>();
     target.addIllegalOp<FuncOp>();
     target.addLegalDialect<LLVM::LLVMDialect>();
     target.addLegalDialect<NVVM::NVVMDialect>();
@@ -719,6 +719,12 @@ void mlir::populateGpuToNVVMConversionPatterns(
                                                "__nv_cos");
   patterns.insert<OpToFuncCallLowering<ExpOp>>(converter, "__nv_expf",
                                                "__nv_exp");
+  patterns.insert<OpToFuncCallLowering<LogOp>>(converter, "__nv_logf",
+                                               "__nv_log");
+  patterns.insert<OpToFuncCallLowering<Log10Op>>(converter, "__nv_log10f",
+                                                 "__nv_log10");
+  patterns.insert<OpToFuncCallLowering<Log2Op>>(converter, "__nv_log2f",
+                                                "__nv_log2");
   patterns.insert<OpToFuncCallLowering<TanhOp>>(converter, "__nv_tanhf",
                                                 "__nv_tanh");
 }

diff  --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 0fd8be01f15a..238821ec8dc3 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -58,13 +58,19 @@ class LowerGpuOpsToROCDLOpsPass
                                                  "__ocml_cos_f64");
     patterns.insert<OpToFuncCallLowering<ExpOp>>(converter, "__ocml_exp_f32",
                                                  "__ocml_exp_f64");
+    patterns.insert<OpToFuncCallLowering<LogOp>>(converter, "__ocml_log_f32",
+                                                 "__ocml_log_f64");
+    patterns.insert<OpToFuncCallLowering<Log10Op>>(
+        converter, "__ocml_log10_f32", "__ocml_log10_f64");
+    patterns.insert<OpToFuncCallLowering<Log2Op>>(converter, "__ocml_log2_f32",
+                                                  "__ocml_log2_f64");
     patterns.insert<OpToFuncCallLowering<TanhOp>>(converter, "__ocml_tanh_f32",
                                                   "__ocml_tanh_f64");
 
     ConversionTarget target(getContext());
     target.addLegalDialect<LLVM::LLVMDialect, ROCDL::ROCDLDialect>();
-    target.addIllegalOp<LLVM::FAbsOp, LLVM::FCeilOp, LLVM::CosOp,
-                        LLVM::ExpOp>();
+    target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::FAbsOp, LLVM::FCeilOp,
+                        LLVM::LogOp, LLVM::Log10Op, LLVM::Log2Op>();
     target.addDynamicallyLegalOp<LLVM::CallOp>(
         gpu::filterIllegalLLVMIntrinsics({"tanh", "tanhf"}, m.getContext()));
     target.addIllegalOp<FuncOp>();

diff  --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index b27ee0345a07..4ae9d8022bc3 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -154,21 +154,6 @@ gpu.module @test_module {
   }
 }
 
-// -----
-
-gpu.module @test_module {
-  // CHECK: llvm.func @__nv_tanhf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_tanh(!llvm.double) -> !llvm.double
-  // CHECK-LABEL: func @gpu_tanh
-  func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) {
-    %result32 = std.tanh %arg_f32 : f32
-    // CHECK: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float
-    %result64 = std.tanh %arg_f64 : f64
-    // CHECK: llvm.call @__nv_tanh(%{{.*}}) : (!llvm.double) -> !llvm.double
-    std.return
-  }
-}
-
 // -----
 gpu.module @test_module {
   // CHECK: llvm.func @__nv_expf(!llvm.float) -> !llvm.float
@@ -187,6 +172,66 @@ gpu.module @test_module {
 
 // -----
 
+gpu.module @test_module {
+  // CHECK: llvm.func @__nv_logf(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__nv_log(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log
+  func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log %arg_f32 : f32
+    // CHECK: llvm.call @__nv_logf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log %arg_f64 : f64
+    // CHECK: llvm.call @__nv_log(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @test_module {
+  // CHECK: llvm.func @__nv_log10f(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__nv_log10(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log10
+  func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log10 %arg_f32 : f32
+    // CHECK: llvm.call @__nv_log10f(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log10 %arg_f64 : f64
+    // CHECK: llvm.call @__nv_log10(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @test_module {
+  // CHECK: llvm.func @__nv_log2f(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__nv_log2(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log2
+  func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log2 %arg_f32 : f32
+    // CHECK: llvm.call @__nv_log2f(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log2 %arg_f64 : f64
+    // CHECK: llvm.call @__nv_log2(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @test_module {
+  // CHECK: llvm.func @__nv_tanhf(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__nv_tanh(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_tanh
+  func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.tanh %arg_f32 : f32
+    // CHECK: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.tanh %arg_f64 : f64
+    // CHECK: llvm.call @__nv_tanh(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
 // Test that we handled properly operation with SymbolTable other than module op
 gpu.module @test_module {
   "test.symbol_scope"() ({

diff  --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
index b733e9b6aadb..7400d4f0bb1e 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
@@ -81,21 +81,6 @@ gpu.module @kernel_module {
   }
 }
 
-// -----
-
-gpu.module @kernel_module {
-  // CHECK: llvm.func @__ocml_tanh_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_tanh_f64(!llvm.double) -> !llvm.double
-  // CHECK-LABEL: func @gpu_tanh
-  func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) {
-    %result32 = std.tanh %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
-    %result64 = std.tanh %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
-    std.return
-  }
-}
-
 // -----
 gpu.module @kernel_module {
   // CHECK: llvm.func @__ocml_exp_f32(!llvm.float) -> !llvm.float
@@ -134,3 +119,63 @@ gpu.module @kernel_module {
     "test.finish" () : () -> ()
   }) : () -> ()
 }
+
+// -----
+
+gpu.module @kernel_module {
+  // CHECK: llvm.func @__ocml_log_f32(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__ocml_log_f64(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log
+  func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log %arg_f32 : f32
+    // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log %arg_f64 : f64
+    // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @kernel_module {
+  // CHECK: llvm.func @__ocml_log10_f32(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__ocml_log10_f64(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log10
+  func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log10 %arg_f32 : f32
+    // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log10 %arg_f64 : f64
+    // CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @kernel_module {
+  // CHECK: llvm.func @__ocml_log2_f32(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__ocml_log2_f64(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_log2
+  func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.log2 %arg_f32 : f32
+    // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.log2 %arg_f64 : f64
+    // CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}
+
+// -----
+
+gpu.module @kernel_module {
+  // CHECK: llvm.func @__ocml_tanh_f32(!llvm.float) -> !llvm.float
+  // CHECK: llvm.func @__ocml_tanh_f64(!llvm.double) -> !llvm.double
+  // CHECK-LABEL: func @gpu_tanh
+  func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) {
+    %result32 = std.tanh %arg_f32 : f32
+    // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    %result64 = std.tanh %arg_f64 : f64
+    // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    std.return
+  }
+}


        


More information about the Mlir-commits mailing list