[Mlir-commits] [mlir] 14d99ea - [MLIR] Convert remaining tests to opaque pointers (NFC)
Nikita Popov
llvmlistbot at llvm.org
Wed Jan 25 04:14:04 PST 2023
Author: Nikita Popov
Date: 2023-01-25T13:13:55+01:00
New Revision: 14d99ea16c4433a6b6dbaf6a7e3b9f14984a9417
URL: https://github.com/llvm/llvm-project/commit/14d99ea16c4433a6b6dbaf6a7e3b9f14984a9417
DIFF: https://github.com/llvm/llvm-project/commit/14d99ea16c4433a6b6dbaf6a7e3b9f14984a9417.diff
LOG: [MLIR] Convert remaining tests to opaque pointers (NFC)
These were the final tests using -opaque-pointers=0 in mlir/.
Added:
Modified:
mlir/test/Target/LLVMIR/Import/basic.ll
mlir/test/Target/LLVMIR/Import/constant.ll
mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
mlir/test/Target/LLVMIR/Import/incorrect-constexpr-inst-caching.ll
mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll
mlir/test/Target/LLVMIR/Import/intrinsic.ll
mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
Removed:
################################################################################
diff --git a/mlir/test/Target/LLVMIR/Import/basic.ll b/mlir/test/Target/LLVMIR/Import/basic.ll
index 050197c90c838..3b9b1c2909c16 100644
--- a/mlir/test/Target/LLVMIR/Import/basic.ll
+++ b/mlir/test/Target/LLVMIR/Import/basic.ll
@@ -1,5 +1,5 @@
-; RUN: mlir-translate -opaque-pointers=0 -import-llvm %s | FileCheck %s
-; RUN: mlir-translate -opaque-pointers=0 -import-llvm -mlir-print-debuginfo %s | FileCheck %s --check-prefix=CHECK-DBG
+; RUN: mlir-translate -import-llvm %s | FileCheck %s
+; RUN: mlir-translate -import-llvm -mlir-print-debuginfo %s | FileCheck %s --check-prefix=CHECK-DBG
; CHECK-DBG: #[[MODULELOC:.+]] = loc({{.*}}basic.ll{{.*}}:0:0)
@@ -16,16 +16,16 @@ declare float @fe(i32)
; CHECK: %[[c42:[0-9]+]] = llvm.mlir.constant(42 : i32) : i32
define internal dso_local i32 @f1(i64 %a) norecurse {
entry:
-; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
- %aa = inttoptr i64 %a to i64*
-; CHECK-DBG: llvm.mlir.addressof @global : !llvm.ptr<f64> loc(#[[MODULELOC]])
-; %[[addrof:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr<f64>
-; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr<f64>
-; %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
-; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr<f64> to i64
-; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr<f64>, i32) -> !llvm.ptr<f64>
- %bb = ptrtoint double* @global to i64
- %cc = getelementptr double, double* @global, i32 3
+; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr
+ %aa = inttoptr i64 %a to ptr
+; CHECK-DBG: llvm.mlir.addressof @global : !llvm.ptr loc(#[[MODULELOC]])
+; %[[addrof:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr
+; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr
+; %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr
+; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr to i64
+; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr, i32) -> !llvm.ptr
+ %bb = ptrtoint ptr @global to i64
+ %cc = getelementptr double, ptr @global, i32 3
; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : i64 to i32
; CHECK-DBG: llvm.trunc %arg0 : i64 to i32 loc(#[[MODULELOC]])
%b = trunc i64 %a to i32
@@ -54,35 +54,35 @@ if.end:
; CHECK-DBG: } loc(#[[MODULELOC]])
- at _ZTIi = external dso_local constant i8*
- at _ZTIii= external dso_local constant i8**
-declare void @foo(i8*)
-declare i8* @bar(i8*)
+ at _ZTIi = external dso_local constant ptr
+ at _ZTIii= external dso_local constant ptr
+declare void @foo(ptr)
+declare ptr @bar(ptr)
declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: @invokeLandingpad
-define i32 @invokeLandingpad() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
- ; CHECK: %[[a1:[0-9]+]] = llvm.bitcast %{{[0-9]+}} : !llvm.ptr<ptr<ptr<i8>>> to !llvm.ptr<i8>
- ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 {alignment = 1 : i64} : (i32) -> !llvm.ptr<i8>
+define i32 @invokeLandingpad() personality ptr @__gxx_personality_v0 {
+ ; CHECK: %[[a1:[0-9]+]] = llvm.mlir.addressof @_ZTIii : !llvm.ptr
+ ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 {alignment = 1 : i64} : (i32) -> !llvm.ptr
%1 = alloca i8
- ; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> ()
- invoke void @foo(i8* %1) to label %4 unwind label %2
+ ; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> ()
+ invoke void @foo(ptr %1) to label %4 unwind label %2
; CHECK: ^bb1:
- ; CHECK: %{{[0-9]+}} = llvm.landingpad (catch %{{[0-9]+}} : !llvm.ptr<ptr<i8>>) (catch %[[a1]] : !llvm.ptr<i8>) (filter %{{[0-9]+}} : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
- %3 = landingpad { i8*, i32 } catch i8** @_ZTIi catch i8* bitcast (i8*** @_ZTIii to i8*)
+ ; CHECK: %{{[0-9]+}} = llvm.landingpad (catch %{{[0-9]+}} : !llvm.ptr) (catch %[[a1]] : !llvm.ptr) (filter %{{[0-9]+}} : !llvm.array<1 x i8>) : !llvm.struct<(ptr, i32)>
+ %3 = landingpad { ptr, i32 } catch ptr @_ZTIi catch ptr @_ZTIii
; FIXME: Change filter to a constant array once they are handled.
; Currently, even though it parses this, LLVM module is broken
filter [1 x i8] [i8 1]
- resume { i8*, i32 } %3
+ resume { ptr, i32 } %3
; CHECK: ^bb2:
; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 1
; CHECK: ^bb3:
- ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
- %6 = invoke i8* @bar(i8* %1) to label %4 unwind label %2
+ ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> !llvm.ptr
+ %6 = invoke ptr @bar(ptr %1) to label %4 unwind label %2
; CHECK: ^bb4:
; CHECK: llvm.return %{{[0-9]+}} : i32
@@ -107,32 +107,28 @@ define i32 @useFreezeOp(i32 %x) {
}
; Varadic function definition
-%struct.va_list = type { i8* }
+%struct.va_list = type { ptr }
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
; CHECK-LABEL: llvm.func @variadic_function
define void @variadic_function(i32 %X, ...) {
- ; CHECK: %[[ALLOCA0:.+]] = llvm.alloca %{{.*}} x !llvm.struct<"struct.va_list", (ptr<i8>)> {{.*}} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
+ ; CHECK: %[[ALLOCA0:.+]] = llvm.alloca %{{.*}} x !llvm.struct<"struct.va_list", (ptr)> {{.*}} : (i32) -> !llvm.ptr
%ap = alloca %struct.va_list
- ; CHECK: %[[CAST0:.+]] = llvm.bitcast %[[ALLOCA0]] : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
- %ap2 = bitcast %struct.va_list* %ap to i8*
- ; CHECK: llvm.intr.vastart %[[CAST0]]
- call void @llvm.va_start(i8* %ap2)
-
- ; CHECK: %[[ALLOCA1:.+]] = llvm.alloca %{{.*}} x !llvm.ptr<i8> {{.*}} : (i32) -> !llvm.ptr<ptr<i8>>
- %aq = alloca i8*
- ; CHECK: %[[CAST1:.+]] = llvm.bitcast %[[ALLOCA1]] : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
- %aq2 = bitcast i8** %aq to i8*
- ; CHECK: llvm.intr.vacopy %[[CAST0]] to %[[CAST1]]
- call void @llvm.va_copy(i8* %aq2, i8* %ap2)
- ; CHECK: llvm.intr.vaend %[[CAST1]]
- call void @llvm.va_end(i8* %aq2)
-
- ; CHECK: llvm.intr.vaend %[[CAST0]]
- call void @llvm.va_end(i8* %ap2)
+ ; CHECK: llvm.intr.vastart %[[ALLOCA0]]
+ call void @llvm.va_start(ptr %ap)
+
+ ; CHECK: %[[ALLOCA1:.+]] = llvm.alloca %{{.*}} x !llvm.ptr {{.*}} : (i32) -> !llvm.ptr
+ %aq = alloca ptr
+ ; CHECK: llvm.intr.vacopy %[[ALLOCA0]] to %[[ALLOCA1]]
+ call void @llvm.va_copy(ptr %aq, ptr %ap)
+ ; CHECK: llvm.intr.vaend %[[ALLOCA1]]
+ call void @llvm.va_end(ptr %aq)
+
+ ; CHECK: llvm.intr.vaend %[[ALLOCA0]]
+ call void @llvm.va_end(ptr %ap)
; CHECK: llvm.return
ret void
}
diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll
index 8ff836d066c44..12175fbc9cb60 100644
--- a/mlir/test/Target/LLVMIR/Import/constant.ll
+++ b/mlir/test/Target/LLVMIR/Import/constant.ll
@@ -1,4 +1,4 @@
-; RUN: mlir-translate -opaque-pointers=0 -import-llvm -split-input-file %s | FileCheck %s
+; RUN: mlir-translate -import-llvm -split-input-file %s | FileCheck %s
; CHECK-LABEL: @int_constants
define void @int_constants(i16 %arg0, i32 %arg1, i1 %arg2) {
@@ -48,22 +48,10 @@ define void @undef_constant(i32 %arg0) {
; // -----
; CHECK-LABEL: @null_constant
-define i32* @null_constant() {
- ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr<i32>
- ; CHECK: llvm.return %[[NULL]] : !llvm.ptr<i32>
- ret i32* bitcast (double* null to i32*)
-}
-
-; // -----
-
- at global = external global double, align 8
-
-; CHECK-LABEL: @bitcast_const_expr
-define i32* @bitcast_const_expr() {
- ; CHECK: %[[VAL0:.*]] = llvm.mlir.addressof @global : !llvm.ptr<f64>
- ; CHECK: %[[VAL1:.*]] = llvm.bitcast %[[VAL0]] : !llvm.ptr<f64> to !llvm.ptr<i32>
- ; CHECK: llvm.return %[[VAL1]] : !llvm.ptr<i32>
- ret i32* bitcast (double* @global to i32*)
+define ptr @null_constant() {
+ ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr
+ ; CHECK: llvm.return %[[NULL]] : !llvm.ptr
+ ret ptr null
}
; // -----
@@ -71,12 +59,12 @@ define i32* @bitcast_const_expr() {
@global = external global i32, align 8
; CHECK-LABEL: @gep_const_expr
-define i32* @gep_const_expr() {
- ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr<i32>
+define ptr @gep_const_expr() {
+ ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr
; CHECK: %[[IDX:[0-9]+]] = llvm.mlir.constant(2 : i32) : i32
- ; CHECK: %[[GEP:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX]]] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- ; CHECK: llvm.return %[[GEP]] : !llvm.ptr<i32>
- ret i32* getelementptr (i32, i32* @global, i32 2)
+ ; CHECK: %[[GEP:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX]]] : (!llvm.ptr, i32) -> !llvm.ptr
+ ; CHECK: llvm.return %[[GEP]] : !llvm.ptr
+ ret ptr getelementptr (i32, ptr @global, i32 2)
}
; // -----
@@ -85,16 +73,16 @@ define i32* @gep_const_expr() {
; CHECK-LABEL: @const_expr_with_duplicate
define i64 @const_expr_with_duplicate() {
- ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr<i32>
+ ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr
; CHECK: %[[IDX:[0-9]+]] = llvm.mlir.constant(7 : i32) : i32
- ; CHECK: %[[GEP:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX]]] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- ; CHECK: %[[DUP:[0-9]+]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i32> to i64
+ ; CHECK: %[[GEP:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX]]] : (!llvm.ptr, i32) -> !llvm.ptr
+ ; CHECK: %[[DUP:[0-9]+]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
; Verify the duplicate sub expression is converted only once.
; CHECK: %[[SUM:[0-9]+]] = llvm.add %[[DUP]], %[[DUP]] : i64
; CHECK: llvm.return %[[SUM]] : i64
- ret i64 add (i64 ptrtoint (i32* getelementptr (i32, i32* @global, i32 7) to i64),
- i64 ptrtoint (i32* getelementptr (i32, i32* @global, i32 7) to i64))
+ ret i64 add (i64 ptrtoint (ptr getelementptr (i32, ptr @global, i32 7) to i64),
+ i64 ptrtoint (ptr getelementptr (i32, ptr @global, i32 7) to i64))
}
; // -----
@@ -105,10 +93,10 @@ define i64 @const_expr_with_duplicate() {
define i64 @const_expr_with_aggregate() {
; Compute the vector elements.
; CHECK: %[[VAL1:[0-9]+]] = llvm.mlir.constant(33 : i64) : i64
- ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr<i32>
+ ; CHECK: %[[ADDR:[0-9]+]] = llvm.mlir.addressof @global : !llvm.ptr
; CHECK: %[[IDX1:[0-9]+]] = llvm.mlir.constant(7 : i32) : i32
- ; CHECK: %[[GEP1:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX1]]] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- ; CHECK: %[[VAL2:[0-9]+]] = llvm.ptrtoint %[[GEP1]] : !llvm.ptr<i32> to i64
+ ; CHECK: %[[GEP1:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX1]]] : (!llvm.ptr, i32) -> !llvm.ptr
+ ; CHECK: %[[VAL2:[0-9]+]] = llvm.ptrtoint %[[GEP1]] : !llvm.ptr to i64
; Fill the vector.
; CHECK: %[[VEC1:[0-9]+]] = llvm.mlir.undef : vector<2xi64>
@@ -119,15 +107,15 @@ define i64 @const_expr_with_aggregate() {
; CHECK: %[[IDX4:[0-9]+]] = llvm.mlir.constant(42 : i32) : i32
; Compute the extract index.
- ; CHECK: %[[GEP2:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX4]]] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- ; CHECK: %[[IDX5:[0-9]+]] = llvm.ptrtoint %[[GEP2]] : !llvm.ptr<i32> to i64
+ ; CHECK: %[[GEP2:[0-9]+]] = llvm.getelementptr %[[ADDR]][%[[IDX4]]] : (!llvm.ptr, i32) -> !llvm.ptr
+ ; CHECK: %[[IDX5:[0-9]+]] = llvm.ptrtoint %[[GEP2]] : !llvm.ptr to i64
; Extract the vector element.
; CHECK: %[[ELEM:[0-9]+]] = llvm.extractelement %[[VEC3]][%[[IDX5]] : i64] : vector<2xi64>
; CHECK: llvm.return %[[ELEM]] : i64
ret i64 extractelement (
- <2 x i64> <i64 33, i64 ptrtoint (i32* getelementptr (i32, i32* @global, i32 7) to i64)>,
- i64 ptrtoint (i32* getelementptr (i32, i32* @global, i32 42) to i64))
+ <2 x i64> <i64 33, i64 ptrtoint (ptr getelementptr (i32, ptr @global, i32 7) to i64)>,
+ i64 ptrtoint (ptr getelementptr (i32, ptr @global, i32 42) to i64))
}
; // -----
@@ -137,12 +125,12 @@ define i64 @const_expr_with_aggregate() {
; Calling a function that has not been defined yet.
; CHECK-LABEL: @function_address_before_def
define i32 @function_address_before_def() {
- %1 = alloca i32 ()*
- ; CHECK: %[[FUN:.*]] = llvm.mlir.addressof @callee : !llvm.ptr<func<i32 ()>>
- ; CHECK: llvm.store %[[FUN]], %[[PTR:.*]]
- store i32 ()* @callee, i32 ()** %1
- ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]]
- %2 = load i32 ()*, i32 ()** %1
+ %1 = alloca ptr
+ ; CHECK: %[[FUN:.*]] = llvm.mlir.addressof @callee : !llvm.ptr
+ ; CHECK: llvm.store %[[FUN]], %[[PTR:.*]] : !llvm.ptr, !llvm.ptr
+ store ptr @callee, ptr %1
+ ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]] : !llvm.ptr -> !llvm.ptr
+ %2 = load ptr, ptr %1
; CHECK: llvm.call %[[INDIR]]()
%3 = call i32 %2()
ret i32 %3
@@ -155,12 +143,12 @@ define i32 @callee() {
; Calling a function that has been defined.
; CHECK-LABEL: @function_address_after_def
define i32 @function_address_after_def() {
- %1 = alloca i32 ()*
- ; CHECK: %[[FUN:.*]] = llvm.mlir.addressof @callee : !llvm.ptr<func<i32 ()>>
- ; CHECK: llvm.store %[[FUN]], %[[PTR:.*]]
- store i32 ()* @callee, i32 ()** %1
- ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]]
- %2 = load i32 ()*, i32 ()** %1
+ %1 = alloca ptr
+ ; CHECK: %[[FUN:.*]] = llvm.mlir.addressof @callee : !llvm.ptr
+ ; CHECK: llvm.store %[[FUN]], %[[PTR:.*]] : !llvm.ptr, !llvm.ptr
+ store ptr @callee, ptr %1
+ ; CHECK: %[[INDIR:.*]] = llvm.load %[[PTR]] : !llvm.ptr -> !llvm.ptr
+ %2 = load ptr, ptr %1
; CHECK: llvm.call %[[INDIR]]()
%3 = call i32 %2()
ret i32 %3
@@ -192,22 +180,22 @@ define i32 @function_address_after_def() {
; CHECK: %[[CHAIN1:.+]] = llvm.insertvalue %[[C2]], %[[CHAIN0]][1]
; CHECK: %[[CHAIN2:.+]] = llvm.insertvalue %[[C3]], %[[CHAIN1]][2]
; CHECK: %[[CHAIN3:.+]] = llvm.insertvalue %[[C4]], %[[CHAIN2]][3]
-; CHECK: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>
-; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"nested_agg_type", (struct<"simple_agg_type", (i32, i8, i16, i32)>, ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>)>
+; CHECK: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"nested_agg_type", (struct<"simple_agg_type", (i32, i8, i16, i32)>, ptr)>
; CHECK: %[[CHAIN4:.+]] = llvm.insertvalue %[[CHAIN3]], %[[ROOT]][0]
; CHECK: %[[CHAIN5:.+]] = llvm.insertvalue %[[NULL]], %[[CHAIN4]][1]
; CHECK: llvm.return %[[CHAIN5]]
-%nested_agg_type = type {%simple_agg_type, %simple_agg_type*}
- at nested_agg = global %nested_agg_type { %simple_agg_type{i32 1, i8 2, i16 3, i32 4}, %simple_agg_type* null }
+%nested_agg_type = type {%simple_agg_type, ptr}
+ at nested_agg = global %nested_agg_type { %simple_agg_type{i32 1, i8 2, i16 3, i32 4}, ptr null }
-; CHECK: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>
-; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.vec<2 x ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>>
+; CHECK: %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.vec<2 x ptr>
; CHECK: %[[P0:.+]] = llvm.mlir.constant(0 : i32) : i32
-; CHECK: %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : !llvm.vec<2 x ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>>
+; CHECK: %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : !llvm.vec<2 x ptr>
; CHECK: %[[P1:.+]] = llvm.mlir.constant(1 : i32) : i32
-; CHECK: %[[CHAIN1:.+]] = llvm.insertelement %[[NULL]], %[[CHAIN0]][%[[P1]] : i32] : !llvm.vec<2 x ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>>
-; CHECK: llvm.return %[[CHAIN1]] : !llvm.vec<2 x ptr<struct<"simple_agg_type", (i32, i8, i16, i32)>>>
- at vector_agg = global <2 x %simple_agg_type*> <%simple_agg_type* null, %simple_agg_type* null>
+; CHECK: %[[CHAIN1:.+]] = llvm.insertelement %[[NULL]], %[[CHAIN0]][%[[P1]] : i32] : !llvm.vec<2 x ptr>
+; CHECK: llvm.return %[[CHAIN1]] : !llvm.vec<2 x ptr>
+ at vector_agg = global <2 x ptr> <ptr null, ptr null>
; // -----
@@ -217,12 +205,12 @@ define i32 @function_address_after_def() {
; CHECK-LABEL: @const_exprs_with_duplicate
define i64 @const_exprs_with_duplicate() {
- ; CHECK: %[[ADDR:.+]] = llvm.mlir.addressof @global : !llvm.ptr<i32>
- ; CHECK: llvm.getelementptr %[[ADDR]][%{{.*}}] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- %1 = add i64 1, ptrtoint (i32* getelementptr (i32, i32* @global, i32 7) to i64)
+ ; CHECK: %[[ADDR:.+]] = llvm.mlir.addressof @global : !llvm.ptr
+ ; CHECK: llvm.getelementptr %[[ADDR]][%{{.*}}] : (!llvm.ptr, i32) -> !llvm.ptr
+ %1 = add i64 1, ptrtoint (ptr getelementptr (i32, ptr @global, i32 7) to i64)
; Verify the address value is reused.
- ; CHECK: llvm.getelementptr %[[ADDR]][%{{.*}}] : (!llvm.ptr<i32>, i32) -> !llvm.ptr<i32>
- %2 = add i64 %1, ptrtoint (i32* getelementptr (i32, i32* @global, i32 42) to i64)
+ ; CHECK: llvm.getelementptr %[[ADDR]][%{{.*}}] : (!llvm.ptr, i32) -> !llvm.ptr
+ %2 = add i64 %1, ptrtoint (ptr getelementptr (i32, ptr @global, i32 42) to i64)
ret i64 %2
}
diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll b/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
index e430283ed36ba..1953ecd683721 100644
--- a/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
+++ b/mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll
@@ -1,4 +1,4 @@
-; RUN: mlir-translate -opaque-pointers=0 --import-llvm %s | FileCheck %s
+; RUN: mlir-translate --import-llvm %s | FileCheck %s
; Testing the fix for issue where llvm.getelementptr translated from the second
; ConstantExpr-GEP tried to use llvm.constant(0: i32)-s that were below itself,
@@ -7,24 +7,24 @@
; This test is primarily used to make sure an verification is passed. Thus, we
; only wrote minimum level of checks.
-%my_struct = type {i32, i8*}
+%my_struct = type {i32, ptr}
; CHECK: llvm.mlir.constant(8 : i32) : i32
-; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr<array<5 x i8>>
+; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr
; CHECK: llvm.mlir.constant(0 : i32) : i32
; CHECK: llvm.getelementptr
-; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr<i8>)>
+; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr)>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
; CHECK: llvm.mlir.constant(7 : i32) : i32
-; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr<array<5 x i8>>
+; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr
; CHECK: llvm.getelementptr
-; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr<i8>)>
+; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr)>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
-; CHECK: llvm.mlir.undef : !llvm.array<2 x struct<"my_struct", (i32, ptr<i8>)>>
+; CHECK: llvm.mlir.undef : !llvm.array<2 x struct<"my_struct", (i32, ptr)>>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
; CHECK: llvm.return
@str0 = private unnamed_addr constant [5 x i8] c"aaaa\00"
@str1 = private unnamed_addr constant [5 x i8] c"bbbb\00"
- at g = global [2 x %my_struct] [%my_struct {i32 8, i8* getelementptr ([5 x i8], [5 x i8]* @str0, i32 0, i32 0)}, %my_struct {i32 7, i8* getelementptr ([5 x i8], [5 x i8]* @str1, i32 0, i32 0)}]
+ at g = global [2 x %my_struct] [%my_struct {i32 8, ptr getelementptr ([5 x i8], ptr @str0, i32 0, i32 1)}, %my_struct {i32 7, ptr getelementptr ([5 x i8], ptr @str1, i32 0, i32 1)}]
diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-constexpr-inst-caching.ll b/mlir/test/Target/LLVMIR/Import/incorrect-constexpr-inst-caching.ll
index 43f6377c95551..15957050e079a 100644
--- a/mlir/test/Target/LLVMIR/Import/incorrect-constexpr-inst-caching.ll
+++ b/mlir/test/Target/LLVMIR/Import/incorrect-constexpr-inst-caching.ll
@@ -1,30 +1,30 @@
-; RUN: mlir-translate -opaque-pointers=0 --import-llvm %s | FileCheck %s
+; RUN: mlir-translate --import-llvm %s | FileCheck %s
; REQUIRES: asserts
; This test is primarily used to make sure an assertion is not triggered.
; Thus, we only wrote minimum level of checks.
-%my_struct = type {i32, i8*}
+%my_struct = type {i32, ptr}
; CHECK: llvm.mlir.constant(8 : i32) : i32
-; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr<array<5 x i8>>
+; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr
; CHECK: llvm.mlir.constant(0 : i32) : i32
; CHECK: llvm.mlir.constant(1 : i32) : i32
; CHECK: llvm.getelementptr
-; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr<i8>)>
+; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr)>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
; CHECK: llvm.mlir.constant(7 : i32) : i32
-; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr<array<5 x i8>>
+; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr
; CHECK: llvm.mlir.constant(2 : i32) : i32
; CHECK: llvm.mlir.constant(3 : i32) : i32
; CHECK: llvm.getelementptr
-; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr<i8>)>
+; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr)>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
-; CHECK: llvm.mlir.undef : !llvm.array<2 x struct<"my_struct", (i32, ptr<i8>)>>
+; CHECK: llvm.mlir.undef : !llvm.array<2 x struct<"my_struct", (i32, ptr)>>
; CHECK: llvm.insertvalue
; CHECK: llvm.insertvalue
; CHECK: llvm.return
@str0 = private unnamed_addr constant [5 x i8] c"aaaa\00"
@str1 = private unnamed_addr constant [5 x i8] c"bbbb\00"
- at g = global [2 x %my_struct] [%my_struct {i32 8, i8* getelementptr ([5 x i8], [5 x i8]* @str0, i32 0, i32 1)}, %my_struct {i32 7, i8* getelementptr ([5 x i8], [5 x i8]* @str1, i32 2, i32 3)}]
+ at g = global [2 x %my_struct] [%my_struct {i32 8, ptr getelementptr ([5 x i8], ptr @str0, i32 0, i32 1)}, %my_struct {i32 7, ptr getelementptr ([5 x i8], ptr @str1, i32 2, i32 3)}]
diff --git a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll
index 7de99158cc022..6bf7572e7e791 100644
--- a/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll
+++ b/mlir/test/Target/LLVMIR/Import/incorrect-scalable-vector-check.ll
@@ -1,8 +1,8 @@
-; RUN: mlir-translate -opaque-pointers=0 --import-llvm %s | FileCheck %s
+; RUN: mlir-translate --import-llvm %s | FileCheck %s
; CHECK: llvm.func @shufflevector_crash
-define void @shufflevector_crash(<2 x i32*> %arg0) {
- ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr<i32>>
- %1 = shufflevector <2 x i32*> %arg0, <2 x i32*> undef, <2 x i32> <i32 1, i32 0>
+define void @shufflevector_crash(<2 x ptr> %arg0) {
+ ; CHECK: llvm.shufflevector %{{.+}}, %{{.+}} [1, 0] : !llvm.vec<2 x ptr>
+ %1 = shufflevector <2 x ptr> %arg0, <2 x ptr> undef, <2 x i32> <i32 1, i32 0>
ret void
}
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 7f225a3e2b442..1bcafb7d5c23a 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -1,7 +1,7 @@
-; RUN: mlir-translate -opaque-pointers=0 -import-llvm %s | FileCheck %s
+; RUN: mlir-translate -import-llvm %s | FileCheck %s
; CHECK-LABEL: llvm.func @fmuladd_test
-define void @fmuladd_test(float %0, float %1, <8 x float> %2, i8* %3) {
+define void @fmuladd_test(float %0, float %1, <8 x float> %2, ptr %3) {
; CHECK: llvm.intr.fmuladd(%{{.*}}, %{{.*}}, %{{.*}}) : (f32, f32, f32) -> f32
%5 = call float @llvm.fmuladd.f32(float %0, float %1, float %0)
; CHECK: llvm.intr.fmuladd(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
@@ -10,8 +10,8 @@ define void @fmuladd_test(float %0, float %1, <8 x float> %2, i8* %3) {
%7 = call float @llvm.fma.f32(float %0, float %1, float %0)
; CHECK: llvm.intr.fma(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
%8 = call <8 x float> @llvm.fma.v8f32(<8 x float> %2, <8 x float> %2, <8 x float> %2)
- ; CHECK: "llvm.intr.prefetch"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
- call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1)
+ ; CHECK: "llvm.intr.prefetch"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, i32, i32, i32) -> ()
+ call void @llvm.prefetch.p0(ptr %3, i32 0, i32 3, i32 1)
ret void
}
@@ -265,7 +265,7 @@ define void @vector_reductions(float %0, <8 x float> %1, <8 x i32> %2) {
; CHECK-SAME: %[[VEC2:[a-zA-Z0-9]+]]
; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]]
; CHECK-SAME: %[[STRIDE:[a-zA-Z0-9]+]]
-define void @matrix_intrinsics(<64 x float> %vec1, <48 x float> %vec2, float* %ptr, i64 %stride) {
+define void @matrix_intrinsics(<64 x float> %vec1, <48 x float> %vec2, ptr %ptr, i64 %stride) {
; CHECK: llvm.intr.matrix.multiply %[[VEC1]], %[[VEC2]]
; CHECK-SAME: {lhs_columns = 16 : i32, lhs_rows = 4 : i32, rhs_columns = 3 : i32}
%1 = call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %vec1, <48 x float> %vec2, i32 4, i32 16, i32 3)
@@ -274,10 +274,10 @@ define void @matrix_intrinsics(<64 x float> %vec1, <48 x float> %vec2, float* %p
%2 = call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %vec2, i32 3, i32 16)
; CHECK: %[[VAL1:.+]] = llvm.intr.matrix.column.major.load %[[PTR]], <stride = %[[STRIDE]]>
; CHECK-SAME: {columns = 16 : i32, isVolatile = false, rows = 3 : i32}
- %3 = call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* align 4 %ptr, i64 %stride, i1 false, i32 3, i32 16)
+ %3 = call <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr align 4 %ptr, i64 %stride, i1 false, i32 3, i32 16)
; CHECK: llvm.intr.matrix.column.major.store %[[VAL1]], %[[PTR]], <stride = %[[STRIDE]]>
; CHECK-SAME: {columns = 16 : i32, isVolatile = true, rows = 3 : i32}
- call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %3, float* align 4 %ptr, i64 %stride, i1 true, i32 3, i32 16)
+ call void @llvm.matrix.column.major.store.v48f32.i64(<48 x float> %3, ptr align 4 %ptr, i64 %stride, i1 true, i32 3, i32 16)
ret void
}
@@ -291,68 +291,68 @@ define <7 x i1> @get_active_lane_mask(i64 %0, i64 %1) {
; CHECK-LABEL: @masked_load_store_intrinsics
; CHECK-SAME: %[[VEC:[a-zA-Z0-9]+]]
; CHECK-SAME: %[[MASK:[a-zA-Z0-9]+]]
-define void @masked_load_store_intrinsics(<7 x float>* %vec, <7 x i1> %mask) {
+define void @masked_load_store_intrinsics(ptr %vec, <7 x i1> %mask) {
; CHECK: %[[UNDEF:.+]] = llvm.mlir.undef
; CHECK: %[[VAL1:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[UNDEF]] {alignment = 1 : i32}
- ; CHECK-SAME: (!llvm.ptr<vector<7xf32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
- %1 = call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
+ ; CHECK-SAME: (!llvm.ptr, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
+ %1 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
; CHECK: %[[VAL2:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32}
- %2 = call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
+ %2 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
; CHECK: llvm.intr.masked.store %[[VAL2]], %[[VEC]], %[[MASK]] {alignment = 8 : i32}
- ; CHECK-SAME: vector<7xf32>, vector<7xi1> into !llvm.ptr<vector<7xf32>>
- call void @llvm.masked.store.v7f32.p0v7f32(<7 x float> %2, <7 x float>* %vec, i32 8, <7 x i1> %mask)
+ ; CHECK-SAME: vector<7xf32>, vector<7xi1> into !llvm.ptr
+ call void @llvm.masked.store.v7f32.p0(<7 x float> %2, ptr %vec, i32 8, <7 x i1> %mask)
ret void
}
; CHECK-LABEL: @masked_gather_scatter_intrinsics
; CHECK-SAME: %[[VEC:[a-zA-Z0-9]+]]
; CHECK-SAME: %[[MASK:[a-zA-Z0-9]+]]
-define void @masked_gather_scatter_intrinsics(<7 x float*> %vec, <7 x i1> %mask) {
+define void @masked_gather_scatter_intrinsics(<7 x ptr> %vec, <7 x i1> %mask) {
; CHECK: %[[UNDEF:.+]] = llvm.mlir.undef
; CHECK: %[[VAL1:.+]] = llvm.intr.masked.gather %[[VEC]], %[[MASK]], %[[UNDEF]] {alignment = 1 : i32}
- ; CHECK-SAME: (!llvm.vec<7 x ptr<f32>>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
- %1 = call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
+ ; CHECK-SAME: (!llvm.vec<7 x ptr>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
+ %1 = call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
; CHECK: %[[VAL2:.+]] = llvm.intr.masked.gather %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32}
- %2 = call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
+ %2 = call <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr> %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
; CHECK: llvm.intr.masked.scatter %[[VAL2]], %[[VEC]], %[[MASK]] {alignment = 8 : i32}
- ; CHECK-SAME: vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr<f32>>
- call void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float> %2, <7 x float*> %vec, i32 8, <7 x i1> %mask)
+ ; CHECK-SAME: vector<7xf32>, vector<7xi1> into !llvm.vec<7 x ptr>
+ call void @llvm.masked.scatter.v7f32.v7p0(<7 x float> %2, <7 x ptr> %vec, i32 8, <7 x i1> %mask)
ret void
}
; CHECK-LABEL: llvm.func @masked_expand_compress_intrinsics
-define void @masked_expand_compress_intrinsics(float* %0, <7 x i1> %1, <7 x float> %2) {
- ; CHECK: %[[val1:.+]] = "llvm.intr.masked.expandload"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<f32>, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
- %4 = call <7 x float> @llvm.masked.expandload.v7f32(float* %0, <7 x i1> %1, <7 x float> %2)
- ; CHECK: "llvm.intr.masked.compressstore"(%[[val1]], %{{.*}}, %{{.*}}) : (vector<7xf32>, !llvm.ptr<f32>, vector<7xi1>) -> ()
- call void @llvm.masked.compressstore.v7f32(<7 x float> %4, float* %0, <7 x i1> %1)
+define void @masked_expand_compress_intrinsics(ptr %0, <7 x i1> %1, <7 x float> %2) {
+ ; CHECK: %[[val1:.+]] = "llvm.intr.masked.expandload"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
+ %4 = call <7 x float> @llvm.masked.expandload.v7f32(ptr %0, <7 x i1> %1, <7 x float> %2)
+ ; CHECK: "llvm.intr.masked.compressstore"(%[[val1]], %{{.*}}, %{{.*}}) : (vector<7xf32>, !llvm.ptr, vector<7xi1>) -> ()
+ call void @llvm.masked.compressstore.v7f32(<7 x float> %4, ptr %0, <7 x i1> %1)
ret void
}
; CHECK-LABEL: llvm.func @memcpy_test
-define void @memcpy_test(i32 %0, i8* %1, i8* %2) {
+define void @memcpy_test(i32 %0, ptr %1, ptr %2) {
; CHECK: %[[FALSE:.+]] = llvm.mlir.constant(false) : i1
; CHECK: %[[CST:.+]] = llvm.mlir.constant(10 : i64) : i64
- ; CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %[[FALSE]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 %0, i1 false)
- ; CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %[[CST]], %[[FALSE]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i64, i1) -> ()
- call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %1, i8* %2, i64 10, i1 false)
+ ; CHECK: "llvm.intr.memcpy"(%{{.*}}, %{{.*}}, %{{.*}}, %[[FALSE]]) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
+ call void @llvm.memcpy.p0.p0.i32(ptr %1, ptr %2, i32 %0, i1 false)
+ ; CHECK: "llvm.intr.memcpy.inline"(%{{.*}}, %{{.*}}, %[[CST]], %[[FALSE]]) : (!llvm.ptr, !llvm.ptr, i64, i1) -> ()
+ call void @llvm.memcpy.inline.p0.p0.i64(ptr %1, ptr %2, i64 10, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @memmove_test
-define void @memmove_test(i32 %0, i8* %1, i8* %2) {
+define void @memmove_test(i32 %0, ptr %1, ptr %2) {
; CHECK: %[[falseval:.+]] = llvm.mlir.constant(false) : i1
- ; CHECK: "llvm.intr.memmove"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32, i1) -> ()
- call void @llvm.memmove.p0i8.p0i8.i32(i8* %1, i8* %2, i32 %0, i1 false)
+ ; CHECK: "llvm.intr.memmove"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr, !llvm.ptr, i32, i1) -> ()
+ call void @llvm.memmove.p0.p0.i32(ptr %1, ptr %2, i32 %0, i1 false)
ret void
}
; CHECK-LABEL: llvm.func @memset_test
-define void @memset_test(i32 %0, i8* %1, i8 %2) {
+define void @memset_test(i32 %0, ptr %1, i8 %2) {
; CHECK: %[[falseval:.+]] = llvm.mlir.constant(false) : i1
- ; CHECK: "llvm.intr.memset"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr<i8>, i8, i32, i1) -> ()
- call void @llvm.memset.p0i8.i32(i8* %1, i8 %2, i32 %0, i1 false)
+ ; CHECK: "llvm.intr.memset"(%{{.*}}, %{{.*}}, %{{.*}}, %[[falseval]]) : (!llvm.ptr, i8, i32, i1) -> ()
+ call void @llvm.memset.p0.i32(ptr %1, i8 %2, i32 %0, i1 false)
ret void
}
@@ -411,13 +411,13 @@ define void @umul_with_overflow_test(i32 %0, i32 %1, <8 x i32> %2, <8 x i32> %3)
}
; CHECK-LABEL: llvm.func @va_intrinsics_test
-define void @va_intrinsics_test(i8* %0, i8* %1) {
+define void @va_intrinsics_test(ptr %0, ptr %1) {
; CHECK: llvm.intr.vastart %{{.*}}
- call void @llvm.va_start(i8* %0)
+ call void @llvm.va_start(ptr %0)
; CHECK: llvm.intr.vacopy %{{.*}} to %{{.*}}
- call void @llvm.va_copy(i8* %1, i8* %0)
+ call void @llvm.va_copy(ptr %1, ptr %0)
; CHECK: llvm.intr.vaend %{{.*}}
- call void @llvm.va_end(i8* %0)
+ call void @llvm.va_end(ptr %0)
ret void
}
@@ -430,18 +430,18 @@ define void @assume(i1 %true) {
}
; CHECK-LABEL: llvm.func @coro_id
-define void @coro_id(i32 %0, i8* %1) {
+define void @coro_id(i32 %0, ptr %1) {
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token
- %3 = call token @llvm.coro.id(i32 %0, i8* %1, i8* %1, i8* null)
+ %3 = call token @llvm.coro.id(i32 %0, ptr %1, ptr %1, ptr null)
ret void
}
; CHECK-LABEL: llvm.func @coro_begin
-define void @coro_begin(i32 %0, i8* %1) {
+define void @coro_begin(i32 %0, ptr %1) {
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token
- %3 = call token @llvm.coro.id(i32 %0, i8* %1, i8* %1, i8* null)
- ; CHECK: llvm.intr.coro.begin %{{.*}}, %{{.*}} : !llvm.ptr<i8>
- %4 = call i8* @llvm.coro.begin(token %3, i8* %1)
+ %3 = call token @llvm.coro.id(i32 %0, ptr %1, ptr %1, ptr null)
+ ; CHECK: llvm.intr.coro.begin %{{.*}}, %{{.*}} : !llvm.ptr
+ %4 = call ptr @llvm.coro.begin(token %3, ptr %1)
ret void
}
@@ -463,76 +463,76 @@ define void @coro_align() {
}
; CHECK-LABEL: llvm.func @coro_save
-define void @coro_save(i8* %0) {
+define void @coro_save(ptr %0) {
; CHECK: llvm.intr.coro.save %{{.*}} : !llvm.token
- %2 = call token @llvm.coro.save(i8* %0)
+ %2 = call token @llvm.coro.save(ptr %0)
ret void
}
; CHECK-LABEL: llvm.func @coro_suspend
-define void @coro_suspend(i32 %0, i1 %1, i8* %2) {
+define void @coro_suspend(i32 %0, i1 %1, ptr %2) {
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token
- %4 = call token @llvm.coro.id(i32 %0, i8* %2, i8* %2, i8* null)
+ %4 = call token @llvm.coro.id(i32 %0, ptr %2, ptr %2, ptr null)
; CHECK: llvm.intr.coro.suspend %{{.*}}, %{{.*}} : i8
%5 = call i8 @llvm.coro.suspend(token %4, i1 %1)
ret void
}
; CHECK-LABEL: llvm.func @coro_end
-define void @coro_end(i8* %0, i1 %1) {
+define void @coro_end(ptr %0, i1 %1) {
; CHECK: llvm.intr.coro.end
- call i1 @llvm.coro.end(i8* %0, i1 %1)
+ call i1 @llvm.coro.end(ptr %0, i1 %1)
ret void
}
; CHECK-LABEL: llvm.func @coro_free
-define void @coro_free(i32 %0, i8* %1) {
+define void @coro_free(i32 %0, ptr %1) {
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token
- %3 = call token @llvm.coro.id(i32 %0, i8* %1, i8* %1, i8* null)
- ; CHECK: llvm.intr.coro.free %{{.*}}, %{{.*}} : !llvm.ptr<i8>
- %4 = call i8* @llvm.coro.free(token %3, i8* %1)
+ %3 = call token @llvm.coro.id(i32 %0, ptr %1, ptr %1, ptr null)
+ ; CHECK: llvm.intr.coro.free %{{.*}}, %{{.*}} : !llvm.ptr
+ %4 = call ptr @llvm.coro.free(token %3, ptr %1)
ret void
}
; CHECK-LABEL: llvm.func @coro_resume
-define void @coro_resume(i8* %0) {
+define void @coro_resume(ptr %0) {
; CHECK: llvm.intr.coro.resume %{{.*}}
- call void @llvm.coro.resume(i8* %0)
+ call void @llvm.coro.resume(ptr %0)
ret void
}
; CHECK-LABEL: llvm.func @eh_typeid_for
-define void @eh_typeid_for(i8* %0) {
+define void @eh_typeid_for(ptr %0) {
; CHECK: llvm.intr.eh.typeid.for %{{.*}} : i32
- %2 = call i32 @llvm.eh.typeid.for(i8* %0)
+ %2 = call i32 @llvm.eh.typeid.for(ptr %0)
ret void
}
; CHECK-LABEL: llvm.func @stack_save() {
define void @stack_save() {
- ; CHECK: llvm.intr.stacksave : !llvm.ptr<i8>
- %1 = call i8* @llvm.stacksave()
+ ; CHECK: llvm.intr.stacksave : !llvm.ptr
+ %1 = call ptr @llvm.stacksave()
ret void
}
; CHECK-LABEL: llvm.func @stack_restore
-define void @stack_restore(i8* %0) {
+define void @stack_restore(ptr %0) {
; CHECK: llvm.intr.stackrestore %{{.*}}
- call void @llvm.stackrestore(i8* %0)
+ call void @llvm.stackrestore(ptr %0)
ret void
}
; CHECK-LABEL: llvm.func @lifetime
-define void @lifetime(i8* %0) {
- ; CHECK: llvm.intr.lifetime.start 16, %{{.*}} : !llvm.ptr<i8>
- call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
- ; CHECK: llvm.intr.lifetime.end 32, %{{.*}} : !llvm.ptr<i8>
- call void @llvm.lifetime.end.p0i8(i64 32, i8* %0)
+define void @lifetime(ptr %0) {
+ ; CHECK: llvm.intr.lifetime.start 16, %{{.*}} : !llvm.ptr
+ call void @llvm.lifetime.start.p0(i64 16, ptr %0)
+ ; CHECK: llvm.intr.lifetime.end 32, %{{.*}} : !llvm.ptr
+ call void @llvm.lifetime.end.p0(i64 32, ptr %0)
ret void
}
; CHECK-LABEL: llvm.func @vector_predication_intrinsics
-define void @vector_predication_intrinsics(<8 x i32> %0, <8 x i32> %1, <8 x float> %2, <8 x float> %3, <8 x i64> %4, <8 x double> %5, <8 x i32*> %6, i32 %7, float %8, i32* %9, float* %10, <8 x i1> %11, i32 %12) {
+define void @vector_predication_intrinsics(<8 x i32> %0, <8 x i32> %1, <8 x float> %2, <8 x float> %3, <8 x i64> %4, <8 x double> %5, <8 x ptr> %6, i32 %7, float %8, ptr %9, ptr %10, <8 x i1> %11, i32 %12) {
; CHECK: "llvm.intr.vp.add"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32>
%14 = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i1> %11, i32 %12)
; CHECK: "llvm.intr.vp.sub"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, vector<8xi32>, vector<8xi1>, i32) -> vector<8xi32>
@@ -603,14 +603,14 @@ define void @vector_predication_intrinsics(<8 x i32> %0, <8 x i32> %1, <8 x floa
%47 = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %11, <8 x i32> %0, <8 x i32> %1, i32 %12)
; CHECK: "llvm.intr.vp.merge"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi1>, vector<8xi32>, vector<8xi32>, i32) -> vector<8xi32>
%48 = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %11, <8 x i32> %0, <8 x i32> %1, i32 %12)
- ; CHECK: "llvm.intr.vp.store"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, !llvm.ptr<i32>, vector<8xi1>, i32) -> ()
- call void @llvm.vp.store.v8i32.p0i32(<8 x i32> %0, i32* %9, <8 x i1> %11, i32 %12)
- ; CHECK: "llvm.intr.vp.load"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i32>, vector<8xi1>, i32) -> vector<8xi32>
- %49 = call <8 x i32> @llvm.vp.load.v8i32.p0i32(i32* %9, <8 x i1> %11, i32 %12)
- ; CHECK: "llvm.intr.experimental.vp.strided.store"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, !llvm.ptr<i32>, i32, vector<8xi1>, i32) -> ()
- call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32> %0, i32* %9, i32 %7, <8 x i1> %11, i32 %12)
- ; CHECK: "llvm.intr.experimental.vp.strided.load"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i32>, i32, vector<8xi1>, i32) -> vector<8xi32>
- %50 = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %9, i32 %7, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.vp.store"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, !llvm.ptr, vector<8xi1>, i32) -> ()
+ call void @llvm.vp.store.v8i32.p0(<8 x i32> %0, ptr %9, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.vp.load"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, vector<8xi1>, i32) -> vector<8xi32>
+ %49 = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %9, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.experimental.vp.strided.store"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, !llvm.ptr, i32, vector<8xi1>, i32) -> ()
+ call void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32> %0, ptr %9, i32 %7, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.experimental.vp.strided.load"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr, i32, vector<8xi1>, i32) -> vector<8xi32>
+ %50 = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr %9, i32 %7, <8 x i1> %11, i32 %12)
; CHECK: "llvm.intr.vp.trunc"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi64>, vector<8xi1>, i32) -> vector<8xi32>
%51 = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %4, <8 x i1> %11, i32 %12)
; CHECK: "llvm.intr.vp.zext"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi32>, vector<8xi1>, i32) -> vector<8xi64>
@@ -625,10 +625,10 @@ define void @vector_predication_intrinsics(<8 x i32> %0, <8 x i32> %1, <8 x floa
%56 = call <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double> %5, <8 x i1> %11, i32 %12)
; CHECK: "llvm.intr.vp.fptosi"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xf64>, vector<8xi1>, i32) -> vector<8xi64>
%57 = call <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double> %5, <8 x i1> %11, i32 %12)
- ; CHECK: "llvm.intr.vp.ptrtoint"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.vec<8 x ptr<i32>>, vector<8xi1>, i32) -> vector<8xi64>
- %58 = call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*> %6, <8 x i1> %11, i32 %12)
- ; CHECK: "llvm.intr.vp.inttoptr"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x ptr<i32>>
- %59 = call <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64> %4, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.vp.ptrtoint"(%{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.vec<8 x ptr>, vector<8xi1>, i32) -> vector<8xi64>
+ %58 = call <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr> %6, <8 x i1> %11, i32 %12)
+ ; CHECK: "llvm.intr.vp.inttoptr"(%{{.*}}, %{{.*}}, %{{.*}}) : (vector<8xi64>, vector<8xi1>, i32) -> !llvm.vec<8 x ptr>
+ %59 = call <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64> %4, <8 x i1> %11, i32 %12)
ret void
}
@@ -636,7 +636,7 @@ declare float @llvm.fmuladd.f32(float, float, float)
declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare float @llvm.fma.f32(float, float, float)
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
-declare void @llvm.prefetch.p0i8(i8* nocapture readonly, i32 immarg, i32 immarg, i32)
+declare void @llvm.prefetch.p0(ptr nocapture readonly, i32 immarg, i32 immarg, i32)
declare float @llvm.exp.f32(float)
declare <8 x float> @llvm.exp.v8f32(<8 x float>)
declare float @llvm.exp2.f32(float)
@@ -700,19 +700,19 @@ declare float @llvm.vector.reduce.fmul.v8f32(float, <8 x float>)
declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
declare <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float>, <48 x float>, i32 immarg, i32 immarg, i32 immarg)
declare <48 x float> @llvm.matrix.transpose.v48f32(<48 x float>, i32 immarg, i32 immarg)
-declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(float* nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
-declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, float* nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
+declare <48 x float> @llvm.matrix.column.major.load.v48f32.i64(ptr nocapture, i64, i1 immarg, i32 immarg, i32 immarg)
+declare void @llvm.matrix.column.major.store.v48f32.i64(<48 x float>, ptr nocapture writeonly, i64, i1 immarg, i32 immarg, i32 immarg)
declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i64(i64, i64)
-declare <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>*, i32 immarg, <7 x i1>, <7 x float>)
-declare void @llvm.masked.store.v7f32.p0v7f32(<7 x float>, <7 x float>*, i32 immarg, <7 x i1>)
-declare <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*>, i32 immarg, <7 x i1>, <7 x float>)
-declare void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float>, <7 x float*>, i32 immarg, <7 x i1>)
-declare <7 x float> @llvm.masked.expandload.v7f32(float*, <7 x i1>, <7 x float>)
-declare void @llvm.masked.compressstore.v7f32(<7 x float>, float*, <7 x i1>)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64 immarg, i1 immarg)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1 immarg)
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+declare <7 x float> @llvm.masked.load.v7f32.p0(ptr, i32 immarg, <7 x i1>, <7 x float>)
+declare void @llvm.masked.store.v7f32.p0(<7 x float>, ptr, i32 immarg, <7 x i1>)
+declare <7 x float> @llvm.masked.gather.v7f32.v7p0(<7 x ptr>, i32 immarg, <7 x i1>, <7 x float>)
+declare void @llvm.masked.scatter.v7f32.v7p0(<7 x float>, <7 x ptr>, i32 immarg, <7 x i1>)
+declare <7 x float> @llvm.masked.expandload.v7f32(ptr, <7 x i1>, <7 x float>)
+declare void @llvm.masked.compressstore.v7f32(<7 x float>, ptr, <7 x i1>)
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64 immarg, i1 immarg)
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
declare { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32>, <8 x i32>)
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
@@ -725,23 +725,23 @@ declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
declare { <8 x i32>, <8 x i1> } @llvm.smul.with.overflow.v8i32(<8 x i32>, <8 x i32>)
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
declare { <8 x i32>, <8 x i1> } @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>)
-declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
-declare i8* @llvm.coro.begin(token, i8* writeonly)
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
+declare ptr @llvm.coro.begin(token, ptr writeonly)
declare i64 @llvm.coro.size.i64()
declare i32 @llvm.coro.size.i32()
declare i64 @llvm.coro.align.i64()
declare i32 @llvm.coro.align.i32()
-declare token @llvm.coro.save(i8*)
+declare token @llvm.coro.save(ptr)
declare i8 @llvm.coro.suspend(token, i1)
-declare i1 @llvm.coro.end(i8*, i1)
-declare i8* @llvm.coro.free(token, i8* nocapture readonly)
-declare void @llvm.coro.resume(i8*)
-declare i32 @llvm.eh.typeid.for(i8*)
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare i1 @llvm.coro.end(ptr, i1)
+declare ptr @llvm.coro.free(token, ptr nocapture readonly)
+declare void @llvm.coro.resume(ptr)
+declare i32 @llvm.eh.typeid.for(ptr)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)
declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
@@ -777,10 +777,10 @@ declare float @llvm.vp.reduce.fmax.v8f32(float, <8 x float>, <8 x i1>, i32)
declare float @llvm.vp.reduce.fmin.v8f32(float, <8 x float>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
-declare void @llvm.vp.store.v8i32.p0i32(<8 x i32>, i32* nocapture, <8 x i1>, i32)
-declare <8 x i32> @llvm.vp.load.v8i32.p0i32(i32* nocapture, <8 x i1>, i32)
-declare void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32>, i32* nocapture, i32, <8 x i1>, i32)
-declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* nocapture, i32, <8 x i1>, i32)
+declare void @llvm.vp.store.v8i32.p0(<8 x i32>, ptr nocapture, <8 x i1>, i32)
+declare <8 x i32> @llvm.vp.load.v8i32.p0(ptr nocapture, <8 x i1>, i32)
+declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr nocapture, i32, <8 x i1>, i32)
+declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr nocapture, i32, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.zext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.sext.v8i64.v8i32(<8 x i32>, <8 x i1>, i32)
@@ -788,8 +788,8 @@ declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32)
declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.fptoui.v8i64.v8f64(<8 x double>, <8 x i1>, i32)
declare <8 x i64> @llvm.vp.fptosi.v8i64.v8f64(<8 x double>, <8 x i1>, i32)
-declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0i32(<8 x i32*>, <8 x i1>, i32)
-declare <8 x i32*> @llvm.vp.inttoptr.v8p0i32.v8i64(<8 x i64>, <8 x i1>, i32)
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare <8 x i64> @llvm.vp.ptrtoint.v8i64.v8p0(<8 x ptr>, <8 x i1>, i32)
+declare <8 x ptr> @llvm.vp.inttoptr.v8p0.v8i64(<8 x i64>, <8 x i1>, i32)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
declare void @llvm.assume(i1)
diff --git a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
index 27a940cd4e852..41933dcbe7cb1 100644
--- a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
+++ b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
@@ -1,13 +1,12 @@
-; RUN: mlir-translate -opaque-pointers=0 --import-llvm %s | FileCheck %s
+; RUN: mlir-translate --import-llvm %s | FileCheck %s
-%Domain = type { %Domain**, %Domain* }
+%Domain = type { ptr, ptr }
; CHECK: llvm.mlir.global external @D()
-; CHECK-SAME: !llvm.struct<"Domain", (ptr<ptr<struct<"Domain">>>, ptr<struct<"Domain">>)>
-; CHECK: %[[E0:.+]] = llvm.mlir.null : !llvm.ptr<ptr<struct<"Domain", (ptr<ptr<struct<"Domain">>>, ptr<struct<"Domain">>)>>>
-; CHECK: %[[E1:.+]] = llvm.mlir.null : !llvm.ptr<struct<"Domain", (ptr<ptr<struct<"Domain">>>, ptr<struct<"Domain">>)>>
-; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"Domain", (ptr<ptr<struct<"Domain">>>, ptr<struct<"Domain">>)>
+; CHECK-SAME: !llvm.struct<"Domain", (ptr, ptr)>
+; CHECK: %[[E0:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"Domain", (ptr, ptr)>
; CHECK: %[[CHAIN:.+]] = llvm.insertvalue %[[E0]], %[[ROOT]][0]
-; CHECK: %[[RES:.+]] = llvm.insertvalue %[[E1]], %[[CHAIN]][1]
+; CHECK: %[[RES:.+]] = llvm.insertvalue %[[E0]], %[[CHAIN]][1]
; CHECK: llvm.return %[[RES]]
@D = global %Domain zeroinitializer
More information about the Mlir-commits
mailing list