[Mlir-commits] [mlir] [MLIR][VectorToLLVM] Remove typed pointer support (PR #71075)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Nov 2 09:14:43 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir
Author: Christian Ulmann (Dinistro)
<details>
<summary>Changes</summary>
This commit removes the support for lowering Vector to LLVM dialect with typed pointers. Typed pointers have been deprecated for a while now and it's planned to soon remove them from the LLVM dialect.
Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
---
Full diff: https://github.com/llvm/llvm-project/pull/71075.diff
8 Files Affected:
- (modified) mlir/include/mlir/Conversion/Passes.td (+1-4)
- (modified) mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (+2-24)
- (modified) mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp (-1)
- (removed) mlir/test/Conversion/VectorToLLVM/typed-pointers.mlir (-172)
- (modified) mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir (+2-2)
- (modified) mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir (+2-2)
- (modified) mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir (+1-1)
- (modified) mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir (+1-2)
``````````diff
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 00e342f8f30eb3e..c2f90b8984b97fb 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -1270,10 +1270,7 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> {
Option<"x86Vector", "enable-x86vector",
"bool", /*default=*/"false",
"Enables the use of X86Vector dialect while lowering the vector "
- "dialect.">,
- Option<"useOpaquePointers", "use-opaque-pointers", "bool",
- /*default=*/"true", "Generate LLVM IR using opaque pointers "
- "instead of typed pointers">
+ "dialect.">
];
}
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 4af58653c8227ae..75a35b4c801e4a5 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -112,19 +112,6 @@ static Value getIndexedPtrs(ConversionPatternRewriter &rewriter, Location loc,
base, index);
}
-// Casts a strided element pointer to a vector pointer. The vector pointer
-// will be in the same address space as the incoming memref type.
-static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
- Value ptr, MemRefType memRefType, Type vt,
- const LLVMTypeConverter &converter) {
- if (converter.useOpaquePointers())
- return ptr;
-
- unsigned addressSpace = *converter.getMemRefAddressSpace(memRefType);
- auto pType = LLVM::LLVMPointerType::get(vt, addressSpace);
- return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
-}
-
/// Convert `foldResult` into a Value. Integer attribute is converted to
/// an LLVM constant op.
static Value getAsLLVMValue(OpBuilder &builder, Location loc,
@@ -261,10 +248,8 @@ class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
this->typeConverter->convertType(loadOrStoreOp.getVectorType()));
Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.getBase(),
adaptor.getIndices(), rewriter);
- Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype,
- *this->getTypeConverter());
-
- replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter);
+ replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, dataPtr, align,
+ rewriter);
return success();
}
};
@@ -1440,19 +1425,12 @@ class VectorTypeCastOpConversion
// Create descriptor.
auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
- Type llvmTargetElementTy = desc.getElementPtrType();
// Set allocated ptr.
Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
- if (!getTypeConverter()->useOpaquePointers())
- allocated =
- rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
desc.setAllocatedPtr(rewriter, loc, allocated);
// Set aligned ptr.
Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
- if (!getTypeConverter()->useOpaquePointers())
- ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
-
desc.setAlignedPtr(rewriter, loc, ptr);
// Fill offset 0.
auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
index b865a2671fff762..4c6d0672d4108ef 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
@@ -82,7 +82,6 @@ void LowerVectorToLLVMPass::runOnOperation() {
// Convert to the LLVM IR dialect.
LowerToLLVMOptions options(&getContext());
- options.useOpaquePointers = useOpaquePointers;
LLVMTypeConverter converter(&getContext(), options);
RewritePatternSet patterns(&getContext());
populateVectorMaskMaterializationPatterns(patterns, force32BitVectorIndices);
diff --git a/mlir/test/Conversion/VectorToLLVM/typed-pointers.mlir b/mlir/test/Conversion/VectorToLLVM/typed-pointers.mlir
deleted file mode 100644
index 5dbd9a589f39277..000000000000000
--- a/mlir/test/Conversion/VectorToLLVM/typed-pointers.mlir
+++ /dev/null
@@ -1,172 +0,0 @@
-// RUN: mlir-opt %s -convert-vector-to-llvm='use-opaque-pointers=0' -split-input-file | FileCheck %s
-
-func.func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
- %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref<vector<8x8x8xf32>>
- return %0 : memref<vector<8x8x8xf32>>
-}
-// CHECK-LABEL: @vector_type_cast
-// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>>, ptr<array<8 x array<8 x vector<8xf32>>>>, i64)>
-// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<f32> to !llvm.ptr<array<8 x array<8 x vector<8xf32>>>>
-// CHECK: llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>>, ptr<array<8 x array<8 x vector<8xf32>>>>, i64)>
-// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<f32> to !llvm.ptr<array<8 x array<8 x vector<8xf32>>>>
-// CHECK: llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>>, ptr<array<8 x array<8 x vector<8xf32>>>>, i64)>
-// CHECK: llvm.mlir.constant(0 : index
-// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>>, ptr<array<8 x array<8 x vector<8xf32>>>>, i64)>
-
-// -----
-
-func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
- %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref<vector<8x8x8xf32>, 3>
- return %0 : memref<vector<8x8x8xf32>, 3>
-}
-// CHECK-LABEL: @vector_type_cast_non_zero_addrspace
-// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>, 3>, ptr<array<8 x array<8 x vector<8xf32>>>, 3>, i64)>
-// CHECK: %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<f32, 3> to !llvm.ptr<array<8 x array<8 x vector<8xf32>>>, 3>
-// CHECK: llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>, 3>, ptr<array<8 x array<8 x vector<8xf32>>>, 3>, i64)>
-// CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK: %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<f32, 3> to !llvm.ptr<array<8 x array<8 x vector<8xf32>>>, 3>
-// CHECK: llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>, 3>, ptr<array<8 x array<8 x vector<8xf32>>>, 3>, i64)>
-// CHECK: llvm.mlir.constant(0 : index
-// CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vector<8xf32>>>, 3>, ptr<array<8 x array<8 x vector<8xf32>>>, 3>, i64)>
-
-// -----
-
-func.func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
- %f7 = arith.constant 7.0: f32
- %f = vector.transfer_read %A[%base], %f7
- {permutation_map = affine_map<(d0) -> (d0)>} :
- memref<?xf32>, vector<17xf32>
- vector.transfer_write %f, %A[%base]
- {permutation_map = affine_map<(d0) -> (d0)>} :
- vector<17xf32>, memref<?xf32>
- return %f: vector<17xf32>
-}
-// CHECK-LABEL: func @transfer_read_1d
-// CHECK-SAME: %[[MEM:.*]]: memref<?xf32>,
-// CHECK-SAME: %[[BASE:.*]]: index) -> vector<17xf32>
-// CHECK: %[[C7:.*]] = arith.constant 7.0
-//
-// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
-// CHECK: %[[C0:.*]] = arith.constant 0 : index
-// CHECK: %[[DIM:.*]] = memref.dim %[[MEM]], %[[C0]] : memref<?xf32>
-// CHECK: %[[BOUND:.*]] = arith.subi %[[DIM]], %[[BASE]] : index
-//
-// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
-// CHECK: %[[linearIndex:.*]] = arith.constant dense
-// CHECK-SAME: <[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> :
-// CHECK-SAME: vector<17xi32>
-//
-// 3. Create bound vector to compute in-bound mask:
-// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
-// CHECK: %[[btrunc:.*]] = arith.index_cast %[[BOUND]] : index to i32
-// CHECK: %[[boundVecInsert:.*]] = llvm.insertelement %[[btrunc]]
-// CHECK: %[[boundVect:.*]] = llvm.shufflevector %[[boundVecInsert]]
-// CHECK: %[[mask:.*]] = arith.cmpi slt, %[[linearIndex]], %[[boundVect]]
-// CHECK-SAME: : vector<17xi32>
-//
-// 4. Create pass-through vector.
-// CHECK: %[[PASS_THROUGH:.*]] = arith.constant dense<7.{{.*}}> : vector<17xf32>
-//
-// 5. Bitcast to vector form.
-// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}} :
-// CHECK-SAME: (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
-// CHECK-SAME: !llvm.ptr<f32> to !llvm.ptr<vector<17xf32>>
-//
-// 6. Rewrite as a masked read.
-// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %[[vecPtr]], %[[mask]],
-// CHECK-SAME: %[[PASS_THROUGH]] {alignment = 4 : i32} :
-//
-// 1. Let dim be the memref dimension, compute the in-bound index (dim - offset)
-// CHECK: %[[C0_b:.*]] = arith.constant 0 : index
-// CHECK: %[[DIM_b:.*]] = memref.dim %[[MEM]], %[[C0_b]] : memref<?xf32>
-// CHECK: %[[BOUND_b:.*]] = arith.subi %[[DIM_b]], %[[BASE]] : index
-//
-// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
-// CHECK: %[[linearIndex_b:.*]] = arith.constant dense
-// CHECK-SAME: <[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> :
-// CHECK-SAME: vector<17xi32>
-//
-// 3. Create bound vector to compute in-bound mask:
-// [ 0 .. vector_length - 1 ] < [ dim - offset .. dim - offset ]
-// CHECK: %[[btrunc_b:.*]] = arith.index_cast %[[BOUND_b]] : index to i32
-// CHECK: %[[boundVecInsert_b:.*]] = llvm.insertelement %[[btrunc_b]]
-// CHECK: %[[boundVect_b:.*]] = llvm.shufflevector %[[boundVecInsert_b]]
-// CHECK: %[[mask_b:.*]] = arith.cmpi slt, %[[linearIndex_b]],
-// CHECK-SAME: %[[boundVect_b]] : vector<17xi32>
-//
-// 4. Bitcast to vector form.
-// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
-// CHECK-SAME: (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[vecPtr_b:.*]] = llvm.bitcast %[[gep_b]] :
-// CHECK-SAME: !llvm.ptr<f32> to !llvm.ptr<vector<17xf32>>
-//
-// 5. Rewrite as a masked write.
-// CHECK: llvm.intr.masked.store %[[loaded]], %[[vecPtr_b]], %[[mask_b]]
-// CHECK-SAME: {alignment = 4 : i32} :
-// CHECK-SAME: vector<17xf32>, vector<17xi1> into !llvm.ptr<vector<17xf32>>
-
-// -----
-
-func.func @vector_load_op(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
- %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32>
- return %0 : vector<8xf32>
-}
-
-// CHECK-LABEL: func @vector_load_op
-// CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
-// CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]] : i64
-// CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}} : i64
-// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}}[%[[add]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[bcast:.*]] = llvm.bitcast %[[gep]] : !llvm.ptr<f32> to !llvm.ptr<vector<8xf32>>
-// CHECK: llvm.load %[[bcast]] {alignment = 4 : i64} : !llvm.ptr<vector<8xf32>>
-
-// -----
-
-func.func @vector_store_op(%memref : memref<200x100xf32>, %i : index, %j : index) {
- %val = arith.constant dense<11.0> : vector<4xf32>
- vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<4xf32>
- return
-}
-
-// CHECK-LABEL: func @vector_store_op
-// CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
-// CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]] : i64
-// CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}} : i64
-// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}}[%[[add]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[bcast:.*]] = llvm.bitcast %[[gep]] : !llvm.ptr<f32> to !llvm.ptr<vector<4xf32>>
-// CHECK: llvm.store %{{.*}}, %[[bcast]] {alignment = 4 : i64} : !llvm.ptr<vector<4xf32>>
-
-// -----
-
-func.func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
- %c0 = arith.constant 0: index
- %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
- return %0 : vector<16xf32>
-}
-
-// CHECK-LABEL: func @masked_load_op
-// CHECK: %[[CO:.*]] = arith.constant 0 : index
-// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[B:.*]] = llvm.bitcast %[[P]] : !llvm.ptr<f32> to !llvm.ptr<vector<16xf32>>
-// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[B]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr<vector<16xf32>>, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
-// CHECK: return %[[L]] : vector<16xf32>
-
-// -----
-
-func.func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
- %c0 = arith.constant 0: index
- vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32>
- return
-}
-
-// CHECK-LABEL: func @masked_store_op
-// CHECK: %[[CO:.*]] = arith.constant 0 : index
-// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
-// CHECK: %[[B:.*]] = llvm.bitcast %[[P]] : !llvm.ptr<f32> to !llvm.ptr<vector<16xf32>>
-// CHECK: llvm.intr.masked.store %{{.*}}, %[[B]], %{{.*}} {alignment = 4 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr<vector<16xf32>>
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
index 11354213d9ab497..1abadcc345cd2d8 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt %s --convert-vector-to-llvm='force-32bit-vector-indices=1 use-opaque-pointers=1' | FileCheck %s --check-prefix=CMP32
-// RUN: mlir-opt %s --convert-vector-to-llvm='force-32bit-vector-indices=0 use-opaque-pointers=1' | FileCheck %s --check-prefix=CMP64
+// RUN: mlir-opt %s --convert-vector-to-llvm='force-32bit-vector-indices=1' | FileCheck %s --check-prefix=CMP32
+// RUN: mlir-opt %s --convert-vector-to-llvm='force-32bit-vector-indices=0' | FileCheck %s --check-prefix=CMP64
// CMP32-LABEL: @genbool_var_1d(
// CMP32-SAME: %[[ARG:.*]]: index)
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
index 13b7faed4790d7f..22463f57f24cfd2 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt %s -convert-vector-to-llvm='use-opaque-pointers=1' -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -convert-vector-to-llvm='reassociate-fp-reductions use-opaque-pointers=1' -split-input-file | FileCheck %s --check-prefix=REASSOC
+// RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -convert-vector-to-llvm='reassociate-fp-reductions' -split-input-file | FileCheck %s --check-prefix=REASSOC
// CHECK-LABEL: @reduce_add_f32(
// CHECK-SAME: %[[A:.*]]: vector<16xf32>)
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir b/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
index 4a5a2032816626f..811b10721bf2849 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -convert-vector-to-llvm='use-opaque-pointers=1' | mlir-opt | FileCheck %s
+// RUN: mlir-opt %s -convert-vector-to-llvm | mlir-opt | FileCheck %s
// CHECK: vector_scalable_memcopy([[SRC:%arg[0-9]+]]: memref<?xf32>, [[DST:%arg[0-9]+]]
func.func @vector_scalable_memcopy(%src : memref<?xf32>, %dst : memref<?xf32>, %size : index) {
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 05733214bc3ae80..012d30d96799f20 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1,5 +1,4 @@
-// RUN: mlir-opt %s -convert-vector-to-llvm='use-opaque-pointers=1' -split-input-file | FileCheck %s
-
+// RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
func.func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
%0 = vector.bitcast %input : vector<f32> to vector<i32>
``````````
</details>
https://github.com/llvm/llvm-project/pull/71075
More information about the Mlir-commits
mailing list