[Mlir-commits] [mlir] 54759ce - [mlir] [VectorOps] changes to printing support for integers

Aart Bik llvmlistbot at llvm.org
Mon Sep 28 11:43:46 PDT 2020


Author: Aart Bik
Date: 2020-09-28T11:43:31-07:00
New Revision: 54759cefdba929c89a0bde07df19d8946312a974

URL: https://github.com/llvm/llvm-project/commit/54759cefdba929c89a0bde07df19d8946312a974
DIFF: https://github.com/llvm/llvm-project/commit/54759cefdba929c89a0bde07df19d8946312a974.diff

LOG: [mlir] [VectorOps] changes to printing support for integers

(1) simplify integer printing logic by always using 64-bit print
(2) add index support (since vector<16xindex> is planned to be added)
(3) adjust naming convention print_x -> printX

Reviewed By: bkramer

Differential Revision: https://reviews.llvm.org/D88436

Added: 
    

Modified: 
    mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
    mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
    mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
    mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
    mlir/lib/ExecutionEngine/CRunnerUtils.cpp
    mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
    mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
    mlir/test/mlir-cpu-runner/unranked_memref.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
index 3f369b066fbe..b64c1498f309 100644
--- a/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/CRunnerUtils.h
@@ -200,14 +200,14 @@ class DynamicMemRefType {
 //===----------------------------------------------------------------------===//
 // Small runtime support "lib" for vector.print lowering during codegen.
 //===----------------------------------------------------------------------===//
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_i32(int32_t i);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_i64(int64_t l);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_f32(float f);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_f64(double d);
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_open();
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_close();
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_comma();
-extern "C" MLIR_CRUNNERUTILS_EXPORT void print_newline();
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printI64(int64_t i);
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printU64(uint64_t u);
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printF32(float f);
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printF64(double d);
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printOpen();
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printClose();
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printComma();
+extern "C" MLIR_CRUNNERUTILS_EXPORT void printNewline();
 
 #endif // EXECUTIONENGINE_CRUNNERUTILS_H_
 

diff  --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
index 2f17dbe4455f..eef0c9d51185 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
@@ -4,8 +4,8 @@
 
 // End-to-end test of all fp reduction intrinsics (not exhaustive unit tests).
 module {
-  llvm.func @print_newline()
-  llvm.func @print_f32(!llvm.float)
+  llvm.func @printNewline()
+  llvm.func @printF32(!llvm.float)
   llvm.func @entry() {
     // Setup (1,2,3,4).
     %0 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
@@ -26,62 +26,62 @@ module {
 
     %max = "llvm.intr.experimental.vector.reduce.fmax"(%v)
         : (!llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%max) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%max) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 4
 
     %min = "llvm.intr.experimental.vector.reduce.fmin"(%v)
         : (!llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%min) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%min) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 1
 
     %add1 = "llvm.intr.experimental.vector.reduce.v2.fadd"(%0, %v)
         : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%add1) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%add1) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 11
 
     %add1r = "llvm.intr.experimental.vector.reduce.v2.fadd"(%0, %v)
         {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%add1r) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%add1r) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 11
 
     %add2 = "llvm.intr.experimental.vector.reduce.v2.fadd"(%1, %v)
         : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%add2) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%add2) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 12
 
     %add2r = "llvm.intr.experimental.vector.reduce.v2.fadd"(%1, %v)
         {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%add2r) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%add2r) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 12
 
     %mul1 = "llvm.intr.experimental.vector.reduce.v2.fmul"(%0, %v)
         : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%mul1) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%mul1) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 24
 
     %mul1r = "llvm.intr.experimental.vector.reduce.v2.fmul"(%0, %v)
         {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%mul1r) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%mul1r) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 24
 
     %mul2 = "llvm.intr.experimental.vector.reduce.v2.fmul"(%1, %v)
         : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%mul2) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%mul2) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 48
 
     %mul2r = "llvm.intr.experimental.vector.reduce.v2.fmul"(%1, %v)
         {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @print_f32(%mul2r) : (!llvm.float) -> ()
-    llvm.call @print_newline() : () -> ()
+    llvm.call @printF32(%mul2r) : (!llvm.float) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 48
 
     llvm.return

diff  --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
index 227c00a08ad8..e4d578e36e23 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-int.mlir
@@ -4,78 +4,78 @@
 
 // End-to-end test of all int reduction intrinsics (not exhaustive unit tests).
 module {
-  llvm.func @print_newline()
-  llvm.func @print_i32(!llvm.i32)
+  llvm.func @printNewline()
+  llvm.func @printI64(!llvm.i64)
   llvm.func @entry() {
     // Setup (1,2,3,4).
-    %0 = llvm.mlir.constant(1 : i32) : !llvm.i32
-    %1 = llvm.mlir.constant(2 : i32) : !llvm.i32
-    %2 = llvm.mlir.constant(3 : i32) : !llvm.i32
-    %3 = llvm.mlir.constant(4 : i32) : !llvm.i32
-    %4 = llvm.mlir.undef : !llvm.vec<4 x i32>
+    %0 = llvm.mlir.constant(1 : i64) : !llvm.i64
+    %1 = llvm.mlir.constant(2 : i64) : !llvm.i64
+    %2 = llvm.mlir.constant(3 : i64) : !llvm.i64
+    %3 = llvm.mlir.constant(4 : i64) : !llvm.i64
+    %4 = llvm.mlir.undef : !llvm.vec<4 x i64>
     %5 = llvm.mlir.constant(0 : index) : !llvm.i64
-    %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x i32>
-    %7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32]
-        : !llvm.vec<4 x i32>, !llvm.vec<4 x i32>
+    %6 = llvm.insertelement %0, %4[%5 : !llvm.i64] : !llvm.vec<4 x i64>
+    %7 = llvm.shufflevector %6, %4 [0 : i64, 0 : i64, 0 : i64, 0 : i64]
+        : !llvm.vec<4 x i64>, !llvm.vec<4 x i64>
     %8 = llvm.mlir.constant(1 : i64) : !llvm.i64
-    %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x i32>
+    %9 = llvm.insertelement %1, %7[%8 : !llvm.i64] : !llvm.vec<4 x i64>
     %10 = llvm.mlir.constant(2 : i64) : !llvm.i64
-    %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x i32>
+    %11 = llvm.insertelement %2, %9[%10 : !llvm.i64] : !llvm.vec<4 x i64>
     %12 = llvm.mlir.constant(3 : i64) : !llvm.i64
-    %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x i32>
+    %v = llvm.insertelement %3, %11[%12 : !llvm.i64] : !llvm.vec<4 x i64>
 
     %add = "llvm.intr.experimental.vector.reduce.add"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%add) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%add) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 10
 
     %and = "llvm.intr.experimental.vector.reduce.and"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%and) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%and) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 0
 
     %mul = "llvm.intr.experimental.vector.reduce.mul"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%mul) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%mul) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 24
 
     %or = "llvm.intr.experimental.vector.reduce.or"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%or) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%or) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 7
 
     %smax = "llvm.intr.experimental.vector.reduce.smax"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%smax) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%smax) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 4
 
     %smin = "llvm.intr.experimental.vector.reduce.smin"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%smin) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%smin) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 1
 
     %umax = "llvm.intr.experimental.vector.reduce.umax"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%umax) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%umax) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 4
 
     %umin = "llvm.intr.experimental.vector.reduce.umin"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%umin) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%umin) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 1
 
     %xor = "llvm.intr.experimental.vector.reduce.xor"(%v)
-        : (!llvm.vec<4 x i32>) -> !llvm.i32
-    llvm.call @print_i32(%xor) : (!llvm.i32) -> ()
-    llvm.call @print_newline() : () -> ()
+        : (!llvm.vec<4 x i64>) -> !llvm.i64
+    llvm.call @printI64(%xor) : (!llvm.i64) -> ()
+    llvm.call @printNewline() : () -> ()
     // CHECK: 4
 
     llvm.return

diff  --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index b48b435d0278..c3c102111dc6 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -1328,17 +1328,15 @@ class VectorPrintOpConversion : public ConvertToLLVMPattern {
       printer = getPrintFloat(op);
     } else if (eltType.isF64()) {
       printer = getPrintDouble(op);
+    } else if (eltType.isIndex()) {
+      printer = getPrintU64(op);
     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
       // Integers need a zero or sign extension on the operand
       // (depending on the source type) as well as a signed or
       // unsigned print method. Up to 64-bit is supported.
       unsigned width = intTy.getWidth();
       if (intTy.isUnsigned()) {
-        if (width <= 32) {
-          if (width < 32)
-            conversion = PrintConversion::ZeroExt32;
-          printer = getPrintU32(op);
-        } else if (width <= 64) {
+        if (width <= 64) {
           if (width < 64)
             conversion = PrintConversion::ZeroExt64;
           printer = getPrintU64(op);
@@ -1347,16 +1345,12 @@ class VectorPrintOpConversion : public ConvertToLLVMPattern {
         }
       } else {
         assert(intTy.isSignless() || intTy.isSigned());
-        if (width <= 32) {
+        if (width <= 64) {
           // Note that we *always* zero extend booleans (1-bit integers),
           // so that true/false is printed as 1/0 rather than -1/0.
           if (width == 1)
-            conversion = PrintConversion::ZeroExt32;
-          else if (width < 32)
-            conversion = PrintConversion::SignExt32;
-          printer = getPrintI32(op);
-        } else if (width <= 64) {
-          if (width < 64)
+            conversion = PrintConversion::ZeroExt64;
+          else if (width < 64)
             conversion = PrintConversion::SignExt64;
           printer = getPrintI64(op);
         } else {
@@ -1379,8 +1373,6 @@ class VectorPrintOpConversion : public ConvertToLLVMPattern {
 private:
   enum class PrintConversion {
     None,
-    ZeroExt32,
-    SignExt32,
     ZeroExt64,
     SignExt64
   };
@@ -1391,14 +1383,6 @@ class VectorPrintOpConversion : public ConvertToLLVMPattern {
     Location loc = op->getLoc();
     if (rank == 0) {
       switch (conversion) {
-      case PrintConversion::ZeroExt32:
-        value = rewriter.create<ZeroExtendIOp>(
-            loc, value, LLVM::LLVMType::getInt32Ty(rewriter.getContext()));
-        break;
-      case PrintConversion::SignExt32:
-        value = rewriter.create<SignExtendIOp>(
-            loc, value, LLVM::LLVMType::getInt32Ty(rewriter.getContext()));
-        break;
       case PrintConversion::ZeroExt64:
         value = rewriter.create<ZeroExtendIOp>(
             loc, value, LLVM::LLVMType::getInt64Ty(rewriter.getContext()));
@@ -1455,41 +1439,33 @@ class VectorPrintOpConversion : public ConvertToLLVMPattern {
   }
 
   // Helpers for method names.
-  Operation *getPrintI32(Operation *op) const {
-    return getPrint(op, "print_i32",
-                    LLVM::LLVMType::getInt32Ty(op->getContext()));
-  }
   Operation *getPrintI64(Operation *op) const {
-    return getPrint(op, "print_i64",
+    return getPrint(op, "printI64",
                     LLVM::LLVMType::getInt64Ty(op->getContext()));
   }
-  Operation *getPrintU32(Operation *op) const {
-    return getPrint(op, "printU32",
-                    LLVM::LLVMType::getInt32Ty(op->getContext()));
-  }
   Operation *getPrintU64(Operation *op) const {
     return getPrint(op, "printU64",
                     LLVM::LLVMType::getInt64Ty(op->getContext()));
   }
   Operation *getPrintFloat(Operation *op) const {
-    return getPrint(op, "print_f32",
+    return getPrint(op, "printF32",
                     LLVM::LLVMType::getFloatTy(op->getContext()));
   }
   Operation *getPrintDouble(Operation *op) const {
-    return getPrint(op, "print_f64",
+    return getPrint(op, "printF64",
                     LLVM::LLVMType::getDoubleTy(op->getContext()));
   }
   Operation *getPrintOpen(Operation *op) const {
-    return getPrint(op, "print_open", {});
+    return getPrint(op, "printOpen", {});
   }
   Operation *getPrintClose(Operation *op) const {
-    return getPrint(op, "print_close", {});
+    return getPrint(op, "printClose", {});
   }
   Operation *getPrintComma(Operation *op) const {
-    return getPrint(op, "print_comma", {});
+    return getPrint(op, "printComma", {});
   }
   Operation *getPrintNewline(Operation *op) const {
-    return getPrint(op, "print_newline", {});
+    return getPrint(op, "printNewline", {});
   }
 };
 

diff  --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index 6efc48768a96..a6f242352ba2 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -23,15 +23,13 @@
 // By providing elementary printing methods only, this
 // library can remain fully unaware of low-level implementation
 // details of our vectors. Also useful for direct LLVM IR output.
-extern "C" void print_i32(int32_t i) { fprintf(stdout, "%" PRId32, i); }
-extern "C" void print_i64(int64_t l) { fprintf(stdout, "%" PRId64, l); }
-extern "C" void printU32(uint32_t i) { fprintf(stdout, "%" PRIu32, i); }
-extern "C" void printU64(uint64_t l) { fprintf(stdout, "%" PRIu64, l); }
-extern "C" void print_f32(float f) { fprintf(stdout, "%g", f); }
-extern "C" void print_f64(double d) { fprintf(stdout, "%lg", d); }
-extern "C" void print_open() { fputs("( ", stdout); }
-extern "C" void print_close() { fputs(" )", stdout); }
-extern "C" void print_comma() { fputs(", ", stdout); }
-extern "C" void print_newline() { fputc('\n', stdout); }
+extern "C" void printI64(int64_t i) { fprintf(stdout, "%" PRId64, i); }
+extern "C" void printU64(uint64_t u) { fprintf(stdout, "%" PRIu64, u); }
+extern "C" void printF32(float f) { fprintf(stdout, "%g", f); }
+extern "C" void printF64(double d) { fprintf(stdout, "%lg", d); }
+extern "C" void printOpen() { fputs("( ", stdout); }
+extern "C" void printClose() { fputs(" )", stdout); }
+extern "C" void printComma() { fputs(", ", stdout); }
+extern "C" void printNewline() { fputc('\n', stdout); }
 
 #endif

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index d382c50f9132..eae918897d32 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -438,9 +438,9 @@ func @vector_print_scalar_i1(%arg0: i1) {
 //
 // CHECK-LABEL: llvm.func @vector_print_scalar_i1(
 // CHECK-SAME: %[[A:.*]]: !llvm.i1)
-//       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i1 to !llvm.i32
-//       CHECK: llvm.call @print_i32(%[[S]]) : (!llvm.i32) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i1 to !llvm.i64
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_i4(%arg0: i4) {
   vector.print %arg0 : i4
@@ -448,9 +448,9 @@ func @vector_print_scalar_i4(%arg0: i4) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_i4(
 // CHECK-SAME: %[[A:.*]]: !llvm.i4)
-//       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i32
-//       CHECK: llvm.call @print_i32(%[[S]]) : (!llvm.i32) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_si4(%arg0: si4) {
   vector.print %arg0 : si4
@@ -458,9 +458,9 @@ func @vector_print_scalar_si4(%arg0: si4) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_si4(
 // CHECK-SAME: %[[A:.*]]: !llvm.i4)
-//       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i32
-//       CHECK: llvm.call @print_i32(%[[S]]) : (!llvm.i32) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i4 to !llvm.i64
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_ui4(%arg0: ui4) {
   vector.print %arg0 : ui4
@@ -468,9 +468,9 @@ func @vector_print_scalar_ui4(%arg0: ui4) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_ui4(
 // CHECK-SAME: %[[A:.*]]: !llvm.i4)
-//       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i4 to !llvm.i32
-//       CHECK: llvm.call @printU32(%[[S]]) : (!llvm.i32) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i4 to !llvm.i64
+//       CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_i32(%arg0: i32) {
   vector.print %arg0 : i32
@@ -478,8 +478,9 @@ func @vector_print_scalar_i32(%arg0: i32) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_i32(
 // CHECK-SAME: %[[A:.*]]: !llvm.i32)
-//       CHECK:    llvm.call @print_i32(%[[A]]) : (!llvm.i32) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i32 to !llvm.i64
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_ui32(%arg0: ui32) {
   vector.print %arg0 : ui32
@@ -487,8 +488,8 @@ func @vector_print_scalar_ui32(%arg0: ui32) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_ui32(
 // CHECK-SAME: %[[A:.*]]: !llvm.i32)
-//       CHECK:    llvm.call @printU32(%[[A]]) : (!llvm.i32) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i32 to !llvm.i64
+//       CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
 
 func @vector_print_scalar_i40(%arg0: i40) {
   vector.print %arg0 : i40
@@ -497,8 +498,8 @@ func @vector_print_scalar_i40(%arg0: i40) {
 // CHECK-LABEL: llvm.func @vector_print_scalar_i40(
 // CHECK-SAME: %[[A:.*]]: !llvm.i40)
 //       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64
-//       CHECK: llvm.call @print_i64(%[[S]]) : (!llvm.i64) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_si40(%arg0: si40) {
   vector.print %arg0 : si40
@@ -507,8 +508,8 @@ func @vector_print_scalar_si40(%arg0: si40) {
 // CHECK-LABEL: llvm.func @vector_print_scalar_si40(
 // CHECK-SAME: %[[A:.*]]: !llvm.i40)
 //       CHECK: %[[S:.*]] = llvm.sext %[[A]] : !llvm.i40 to !llvm.i64
-//       CHECK: llvm.call @print_i64(%[[S]]) : (!llvm.i64) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: llvm.call @printI64(%[[S]]) : (!llvm.i64) -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_ui40(%arg0: ui40) {
   vector.print %arg0 : ui40
@@ -518,7 +519,7 @@ func @vector_print_scalar_ui40(%arg0: ui40) {
 // CHECK-SAME: %[[A:.*]]: !llvm.i40)
 //       CHECK: %[[S:.*]] = llvm.zext %[[A]] : !llvm.i40 to !llvm.i64
 //       CHECK: llvm.call @printU64(%[[S]]) : (!llvm.i64) -> ()
-//       CHECK: llvm.call @print_newline() : () -> ()
+//       CHECK: llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_i64(%arg0: i64) {
   vector.print %arg0 : i64
@@ -526,8 +527,8 @@ func @vector_print_scalar_i64(%arg0: i64) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_i64(
 // CHECK-SAME: %[[A:.*]]: !llvm.i64)
-//       CHECK:    llvm.call @print_i64(%[[A]]) : (!llvm.i64) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK:    llvm.call @printI64(%[[A]]) : (!llvm.i64) -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_ui64(%arg0: ui64) {
   vector.print %arg0 : ui64
@@ -536,7 +537,16 @@ func @vector_print_scalar_ui64(%arg0: ui64) {
 // CHECK-LABEL: llvm.func @vector_print_scalar_ui64(
 // CHECK-SAME: %[[A:.*]]: !llvm.i64)
 //       CHECK:    llvm.call @printU64(%[[A]]) : (!llvm.i64) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
+
+func @vector_print_scalar_index(%arg0: index) {
+  vector.print %arg0 : index
+  return
+}
+// CHECK-LABEL: llvm.func @vector_print_scalar_index(
+// CHECK-SAME: %[[A:.*]]: !llvm.i64)
+//       CHECK:    llvm.call @printU64(%[[A]]) : (!llvm.i64) -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_f32(%arg0: f32) {
   vector.print %arg0 : f32
@@ -544,8 +554,8 @@ func @vector_print_scalar_f32(%arg0: f32) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_f32(
 // CHECK-SAME: %[[A:.*]]: !llvm.float)
-//       CHECK:    llvm.call @print_f32(%[[A]]) : (!llvm.float) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK:    llvm.call @printF32(%[[A]]) : (!llvm.float) -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_f64(%arg0: f64) {
   vector.print %arg0 : f64
@@ -553,8 +563,8 @@ func @vector_print_scalar_f64(%arg0: f64) {
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_f64(
 // CHECK-SAME: %[[A:.*]]: !llvm.double)
-//       CHECK:    llvm.call @print_f64(%[[A]]) : (!llvm.double) -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK:    llvm.call @printF64(%[[A]]) : (!llvm.double) -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_vector(%arg0: vector<2x2xf32>) {
   vector.print %arg0 : vector<2x2xf32>
@@ -562,30 +572,30 @@ func @vector_print_vector(%arg0: vector<2x2xf32>) {
 }
 // CHECK-LABEL: llvm.func @vector_print_vector(
 // CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<2 x float>>)
-//       CHECK:    llvm.call @print_open() : () -> ()
+//       CHECK:    llvm.call @printOpen() : () -> ()
 //       CHECK:    %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x float>>
-//       CHECK:    llvm.call @print_open() : () -> ()
+//       CHECK:    llvm.call @printOpen() : () -> ()
 //       CHECK:    %[[x1:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
 //       CHECK:    %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : !llvm.i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @print_f32(%[[x2]]) : (!llvm.float) -> ()
-//       CHECK:    llvm.call @print_comma() : () -> ()
+//       CHECK:    llvm.call @printF32(%[[x2]]) : (!llvm.float) -> ()
+//       CHECK:    llvm.call @printComma() : () -> ()
 //       CHECK:    %[[x3:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
 //       CHECK:    %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : !llvm.i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @print_f32(%[[x4]]) : (!llvm.float) -> ()
-//       CHECK:    llvm.call @print_close() : () -> ()
-//       CHECK:    llvm.call @print_comma() : () -> ()
+//       CHECK:    llvm.call @printF32(%[[x4]]) : (!llvm.float) -> ()
+//       CHECK:    llvm.call @printClose() : () -> ()
+//       CHECK:    llvm.call @printComma() : () -> ()
 //       CHECK:    %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x float>>
-//       CHECK:    llvm.call @print_open() : () -> ()
+//       CHECK:    llvm.call @printOpen() : () -> ()
 //       CHECK:    %[[x6:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
 //       CHECK:    %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : !llvm.i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @print_f32(%[[x7]]) : (!llvm.float) -> ()
-//       CHECK:    llvm.call @print_comma() : () -> ()
+//       CHECK:    llvm.call @printF32(%[[x7]]) : (!llvm.float) -> ()
+//       CHECK:    llvm.call @printComma() : () -> ()
 //       CHECK:    %[[x8:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
 //       CHECK:    %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : !llvm.i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @print_f32(%[[x9]]) : (!llvm.float) -> ()
-//       CHECK:    llvm.call @print_close() : () -> ()
-//       CHECK:    llvm.call @print_close() : () -> ()
-//       CHECK:    llvm.call @print_newline() : () -> ()
+//       CHECK:    llvm.call @printF32(%[[x9]]) : (!llvm.float) -> ()
+//       CHECK:    llvm.call @printClose() : () -> ()
+//       CHECK:    llvm.call @printClose() : () -> ()
+//       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>

diff  --git a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
index 5f5ff6f1fbb9..caa945d11b6c 100644
--- a/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
+++ b/mlir/test/mlir-cpu-runner/bare_ptr_call_conv.mlir
@@ -28,9 +28,9 @@ func @simple_add1_add2_test(%arg0: memref<2xf32>, %arg1: memref<2xf32>) {
 // External declarations.
 llvm.func @malloc(!llvm.i64) -> !llvm.ptr<i8>
 llvm.func @free(!llvm.ptr<i8>)
-func @print_f32(%arg0: f32)
-func @print_comma()
-func @print_newline()
+func @printF32(%arg0: f32)
+func @printComma()
+func @printNewline()
 
 // TODO: 'main' function currently has to be provided in LLVM dialect since
 // 'call' op is not yet supported by the bare pointer calling convention. The
@@ -54,18 +54,18 @@ func @print_newline()
 //  call @simple_add1_add2_test(%a, %b) : (memref<2xf32>, memref<2xf32>) -> ()
 //
 //  %l0 = load %a[%c0] : memref<2xf32>
-//  call @print_f32(%l0) : (f32) -> ()
-//  call @print_comma() : () -> ()
+//  call @printF32(%l0) : (f32) -> ()
+//  call @printComma() : () -> ()
 //  %l1 = load %a[%c1] : memref<2xf32>
-//  call @print_f32(%l1) : (f32) -> ()
-//  call @print_newline() : () -> ()
+//  call @printF32(%l1) : (f32) -> ()
+//  call @printNewline() : () -> ()
 //
 //  %l2 = load %b[%c0] : memref<2xf32>
-//  call @print_f32(%l2) : (f32) -> ()
-//  call @print_comma() : () -> ()
+//  call @printF32(%l2) : (f32) -> ()
+//  call @printComma() : () -> ()
 //  %l3 = load %b[%c1] : memref<2xf32>
-//  call @print_f32(%l3) : (f32) -> ()
-//  call @print_newline() : () -> ()
+//  call @printF32(%l3) : (f32) -> ()
+//  call @printNewline() : () -> ()
 //
 //  dealloc %a : memref<2xf32>
 //  dealloc %b : memref<2xf32>
@@ -144,8 +144,8 @@ llvm.func @main() {
   %60 = llvm.add %57, %59 : !llvm.i64
   %61 = llvm.getelementptr %56[%60] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
   %62 = llvm.load %61 : !llvm.ptr<float>
-  llvm.call @print_f32(%62) : (!llvm.float) -> ()
-  llvm.call @print_comma() : () -> ()
+  llvm.call @printF32(%62) : (!llvm.float) -> ()
+  llvm.call @printComma() : () -> ()
   %63 = llvm.extractvalue %20[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
   %64 = llvm.mlir.constant(0 : index) : !llvm.i64
   %65 = llvm.mlir.constant(1 : index) : !llvm.i64
@@ -153,8 +153,8 @@ llvm.func @main() {
   %67 = llvm.add %64, %66 : !llvm.i64
   %68 = llvm.getelementptr %63[%67] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
   %69 = llvm.load %68 : !llvm.ptr<float>
-  llvm.call @print_f32(%69) : (!llvm.float) -> ()
-  llvm.call @print_newline() : () -> ()
+  llvm.call @printF32(%69) : (!llvm.float) -> ()
+  llvm.call @printNewline() : () -> ()
   %70 = llvm.extractvalue %36[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
   %71 = llvm.mlir.constant(0 : index) : !llvm.i64
   %72 = llvm.mlir.constant(1 : index) : !llvm.i64
@@ -162,8 +162,8 @@ llvm.func @main() {
   %74 = llvm.add %71, %73 : !llvm.i64
   %75 = llvm.getelementptr %70[%74] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
   %76 = llvm.load %75 : !llvm.ptr<float>
-  llvm.call @print_f32(%76) : (!llvm.float) -> ()
-  llvm.call @print_comma() : () -> ()
+  llvm.call @printF32(%76) : (!llvm.float) -> ()
+  llvm.call @printComma() : () -> ()
   %77 = llvm.extractvalue %36[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
   %78 = llvm.mlir.constant(0 : index) : !llvm.i64
   %79 = llvm.mlir.constant(1 : index) : !llvm.i64
@@ -171,8 +171,8 @@ llvm.func @main() {
   %81 = llvm.add %78, %80 : !llvm.i64
   %82 = llvm.getelementptr %77[%81] : (!llvm.ptr<float>, !llvm.i64) -> !llvm.ptr<float>
   %83 = llvm.load %82 : !llvm.ptr<float>
-  llvm.call @print_f32(%83) : (!llvm.float) -> ()
-  llvm.call @print_newline() : () -> ()
+  llvm.call @printF32(%83) : (!llvm.float) -> ()
+  llvm.call @printNewline() : () -> ()
   %84 = llvm.extractvalue %20[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
   %85 = llvm.bitcast %84 : !llvm.ptr<float> to !llvm.ptr<i8>
   llvm.call @free(%85) : (!llvm.ptr<i8>) -> ()

diff  --git a/mlir/test/mlir-cpu-runner/unranked_memref.mlir b/mlir/test/mlir-cpu-runner/unranked_memref.mlir
index 4e0c58f71340..c950bfd3547c 100644
--- a/mlir/test/mlir-cpu-runner/unranked_memref.mlir
+++ b/mlir/test/mlir-cpu-runner/unranked_memref.mlir
@@ -102,8 +102,8 @@ func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> {
   return %0 : memref<*xf32>
 }
 
-func @print_i64(index) -> ()
-func @print_newline() -> ()
+func @printU64(index) -> ()
+func @printNewline() -> ()
 
 func @dim_op_of_unranked() {
   %ranked = alloc() : memref<4x3xf32>
@@ -111,14 +111,14 @@ func @dim_op_of_unranked() {
 
   %c0 = constant 0 : index
   %dim_0 = dim %unranked, %c0 : memref<*xf32>
-  call @print_i64(%dim_0) : (index) -> ()
-  call @print_newline() : () -> ()
+  call @printU64(%dim_0) : (index) -> ()
+  call @printNewline() : () -> ()
   // CHECK: 4
 
   %c1 = constant 1 : index
   %dim_1 = dim %unranked, %c1 : memref<*xf32>
-  call @print_i64(%dim_1) : (index) -> ()
-  call @print_newline() : () -> ()
+  call @printU64(%dim_1) : (index) -> ()
+  call @printNewline() : () -> ()
   // CHECK: 3
 
   return


        


More information about the Mlir-commits mailing list