[Mlir-commits] [mlir] [mlir][vector][nfc] Update vector-to-llvm.mlir (PR #118112)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Nov 29 08:45:35 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir

Author: Andrzej WarzyƄski (banach-space)

<details>
<summary>Changes</summary>

* Adds extra comments to group Ops
* Unifies the test function naming, i.e.
  * `@<!-- -->vector_{op_name}_{variant}` -> `@{op_name}_{variant}`
* Unifies input variable names (`%input` -> `%arg0`)

There's still some inconsistencies within this file - I'm happy to send
more updates if folks find it useful. But I'd definitely recommend
splitting across multiple PRs (otherwise it's hard to review).


---

Patch is 41.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/118112.diff


1 Files Affected:

- (modified) mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir (+244-92) 


``````````diff
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 1c42538cf85912..fe69b1a076f9f8 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1,79 +1,81 @@
 // RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
 
-// TODO: Add tests for for vector.type_cast that would cover scalable vectors
+//===----------------------------------------------------------------------===//
+// vector.bticast
+//===----------------------------------------------------------------------===//
 
-func.func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
-  %0 = vector.bitcast %input : vector<f32> to vector<i32>
+func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector<f32>) -> vector<i32> {
+  %0 = vector.bitcast %arg0 : vector<f32> to vector<i32>
   return %0 : vector<i32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector_0d
-// CHECK-SAME:  %[[input:.*]]: vector<f32>
-// CHECK:       %[[vec_f32_1d:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<f32> to vector<1xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<f32>
+// CHECK:       %[[vec_f32_1d:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<f32> to vector<1xf32>
 // CHECK:       %[[vec_i32_1d:.*]] = llvm.bitcast %[[vec_f32_1d]] : vector<1xf32> to vector<1xi32>
 // CHECK:       %[[vec_i32_0d:.*]] = builtin.unrealized_conversion_cast %[[vec_i32_1d]] : vector<1xi32> to vector<i32>
 // CHECK:       return %[[vec_i32_0d]] : vector<i32>
 
 // -----
 
-func.func @bitcast_f32_to_i32_vector(%input: vector<16xf32>) -> vector<16xi32> {
-  %0 = vector.bitcast %input : vector<16xf32> to vector<16xi32>
+func.func @bitcast_f32_to_i32_vector(%arg0: vector<16xf32>) -> vector<16xi32> {
+  %0 = vector.bitcast %arg0 : vector<16xf32> to vector<16xi32>
   return %0 : vector<16xi32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector
-// CHECK-SAME:  %[[input:.*]]: vector<16xf32>
-// CHECK:       llvm.bitcast %[[input]] : vector<16xf32> to vector<16xi32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<16xf32>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<16xf32> to vector<16xi32>
 
-func.func @bitcast_f32_to_i32_vector_scalable(%input: vector<[16]xf32>) -> vector<[16]xi32> {
-  %0 = vector.bitcast %input : vector<[16]xf32> to vector<[16]xi32>
+func.func @bitcast_f32_to_i32_vector_scalable(%arg0: vector<[16]xf32>) -> vector<[16]xi32> {
+  %0 = vector.bitcast %arg0 : vector<[16]xf32> to vector<[16]xi32>
   return %0 : vector<[16]xi32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[16]xf32>
-// CHECK:       llvm.bitcast %[[input]] : vector<[16]xf32> to vector<[16]xi32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[16]xf32>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<[16]xf32> to vector<[16]xi32>
 
 // -----
 
-func.func @bitcast_i8_to_f32_vector(%input: vector<64xi8>) -> vector<16xf32> {
-  %0 = vector.bitcast %input : vector<64xi8> to vector<16xf32>
+func.func @bitcast_i8_to_f32_vector(%arg0: vector<64xi8>) -> vector<16xf32> {
+  %0 = vector.bitcast %arg0 : vector<64xi8> to vector<16xf32>
   return %0 : vector<16xf32>
 }
 
 // CHECK-LABEL: @bitcast_i8_to_f32_vector
-// CHECK-SAME:  %[[input:.*]]: vector<64xi8>
-// CHECK:       llvm.bitcast %[[input]] : vector<64xi8> to vector<16xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<64xi8>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<64xi8> to vector<16xf32>
 
-func.func @bitcast_i8_to_f32_vector_scalable(%input: vector<[64]xi8>) -> vector<[16]xf32> {
-  %0 = vector.bitcast %input : vector<[64]xi8> to vector<[16]xf32>
+func.func @bitcast_i8_to_f32_vector_scalable(%arg0: vector<[64]xi8>) -> vector<[16]xf32> {
+  %0 = vector.bitcast %arg0 : vector<[64]xi8> to vector<[16]xf32>
   return %0 : vector<[16]xf32>
 }
 
 // CHECK-LABEL: @bitcast_i8_to_f32_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[64]xi8>
-// CHECK:       llvm.bitcast %[[input]] : vector<[64]xi8> to vector<[16]xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[64]xi8>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<[64]xi8> to vector<[16]xf32>
 
 // -----
 
-func.func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
-  %0 = vector.bitcast %input : vector<16xindex> to vector<128xi8>
+func.func @bitcast_index_to_i8_vector(%arg0: vector<16xindex>) -> vector<128xi8> {
+  %0 = vector.bitcast %arg0 : vector<16xindex> to vector<128xi8>
   return %0 : vector<128xi8>
 }
 
 // CHECK-LABEL: @bitcast_index_to_i8_vector
-// CHECK-SAME:  %[[input:.*]]: vector<16xindex>
-// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<16xindex> to vector<16xi64>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<16xindex>
+// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<16xindex> to vector<16xi64>
 // CHECK:       llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8>
 
-func.func @bitcast_index_to_i8_vector_scalable(%input: vector<[16]xindex>) -> vector<[128]xi8> {
-  %0 = vector.bitcast %input : vector<[16]xindex> to vector<[128]xi8>
+func.func @bitcast_index_to_i8_vector_scalable(%arg0: vector<[16]xindex>) -> vector<[128]xi8> {
+  %0 = vector.bitcast %arg0 : vector<[16]xindex> to vector<[128]xi8>
   return %0 : vector<[128]xi8>
 }
 
 // CHECK-LABEL: @bitcast_index_to_i8_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[16]xindex>
-// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<[16]xindex> to vector<[16]xi64>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[16]xindex>
+// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<[16]xindex> to vector<[16]xi64>
 // CHECK:       llvm.bitcast %[[T0]] : vector<[16]xi64> to vector<[128]xi8>
 
 // -----
@@ -110,6 +112,10 @@ func.func @bitcast_2d_scalable(%arg0: vector<2x[4]xi32>) -> vector<2x[2]xi64> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.broadcast
+//===----------------------------------------------------------------------===//
+
 func.func @broadcast_vec0d_from_f32(%arg0: f32) -> vector<f32> {
   %0 = vector.broadcast %arg0 : f32 to vector<f32>
   return %0 : vector<f32>
@@ -610,6 +616,10 @@ func.func @broadcast_stretch_in_middle_scalable_v2(%arg0: vector<[4]x1x2xf32>) -
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.outerproduct
+//===----------------------------------------------------------------------===//
+
 func.func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
@@ -758,6 +768,10 @@ func.func @outerproduct_add_scalable(%arg0: vector<2xf32>, %arg1: vector<[3]xf32
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.mask { vector.outerproduct }
+//===----------------------------------------------------------------------===//
+
 func.func @masked_float_add_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: vector<2xf32>, %m: vector<2xi1>) -> vector<2xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<add>} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32>
   return %0 : vector<2xf32>
@@ -996,6 +1010,10 @@ func.func @masked_int_or_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32,
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.shuffle
+//===----------------------------------------------------------------------===//
+
 func.func @shuffle_0D_direct(%arg0: vector<f32>) -> vector<3xf32> {
   %1 = vector.shuffle %arg0, %arg0 [0, 1, 0] : vector<f32>, vector<f32>
   return %1 : vector<3xf32>
@@ -1083,6 +1101,10 @@ func.func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf3
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extractelement
+//===----------------------------------------------------------------------===//
+
 func.func @extractelement_from_vec_0d_f32(%arg0: vector<f32>) -> f32 {
   %1 = vector.extractelement %arg0[] : vector<f32>
   return %1 : f32
@@ -1142,6 +1164,10 @@ func.func @extractelement_from_vec_1d_f32_idx_as_index_scalable(%arg0: vector<[1
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extract
+//===----------------------------------------------------------------------===//
+
 func.func @extract_scalar_from_vec_1d_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.extract %arg0[15]: f32 from vector<16xf32>
   return %0 : f32
@@ -1312,6 +1338,10 @@ func.func @extract_scalar_from_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insertelement
+//===----------------------------------------------------------------------===//
+
 func.func @insertelement_into_vec_0d_f32(%arg0: f32, %arg1: vector<f32>) -> vector<f32> {
   %1 = vector.insertelement %arg0, %arg1[] : vector<f32>
   return %1 : vector<f32>
@@ -1379,6 +1409,10 @@ func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable(%arg0: f
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insert
+//===----------------------------------------------------------------------===//
+
 func.func @insert_scalar_into_vec_1d_f32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32>
   return %0 : vector<4xf32>
@@ -1538,6 +1572,12 @@ func.func @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[1
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.type_cast
+//
+// TODO: Add tests for for vector.type_cast that would cover scalable vectors
+//===----------------------------------------------------------------------===//
+
 func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref<vector<8x8x8xf32>>
   return %0 : memref<vector<8x8x8xf32>>
@@ -1569,11 +1609,11 @@ func.func @type_cast_index(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8xin
 
 // -----
 
-func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
+func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref<vector<8x8x8xf32>, 3>
   return %0 : memref<vector<8x8x8xf32>, 3>
 }
-// CHECK-LABEL: @vector_type_cast_non_zero_addrspace
+// CHECK-LABEL: @type_cast_non_zero_addrspace
 //       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64)>
 //       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:   llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64)>
@@ -1586,6 +1626,10 @@ func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> m
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.print
+//===----------------------------------------------------------------------===//
+
 func.func @print_scalar_i1(%arg0: i1) {
   vector.print %arg0 : i1
   return
@@ -1772,6 +1816,10 @@ func.func @print_string() {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extract_strided_slice
+//===----------------------------------------------------------------------===//
+
 func.func @extract_strided_slice_f32_1d_from_1d(%arg0: vector<4xf32>) -> vector<2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>
   return %0 : vector<2xf32>
@@ -1872,6 +1920,10 @@ func.func @extract_strided_slice_f32_2d_from_2d_scalable(%arg0: vector<4x[8]xf32
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insert_strided_slice
+//===----------------------------------------------------------------------===//
+
 func.func @insert_strided_slice_f32_2d_into_3d(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vector<4x4x4xf32> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xf32> into vector<4x4x4xf32>
   return %0 : vector<4x4x4xf32>
@@ -1998,8 +2050,12 @@ func.func @insert_strided_slice_f32_2d_into_3d_scalable(%arg0: vector<2x[4]xf32>
 
 // -----
 
-func.func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>, %d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
-  // CHECK-LABEL: @vector_fma
+//===----------------------------------------------------------------------===//
+// vector.fma
+//===----------------------------------------------------------------------===//
+
+func.func @fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>, %d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
+  // CHECK-LABEL: @fma
   //  CHECK-SAME: %[[A:.*]]: vector<8xf32>
   //  CHECK-SAME: %[[B:.*]]: vector<2x4xf32>
   //  CHECK-SAME: %[[C:.*]]: vector<1x1x1xf32>
@@ -2033,8 +2089,8 @@ func.func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf
   return %0, %1, %2, %3: vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>
 }
 
-func.func @vector_fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: vector<1x1x[1]xf32>, %d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
-  // CHECK-LABEL: @vector_fma_scalable
+func.func @fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: vector<1x1x[1]xf32>, %d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
+  // CHECK-LABEL: @fma_scalable
   //  CHECK-SAME: %[[A:.*]]: vector<[8]xf32>
   //  CHECK-SAME: %[[B:.*]]: vector<2x[4]xf32>
   //  CHECK-SAME: %[[C:.*]]: vector<1x1x[1]xf32>
@@ -2066,6 +2122,10 @@ func.func @vector_fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: v
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.reduction
+//===----------------------------------------------------------------------===//
+
 func.func @reduce_0d_f32(%arg0: vector<f32>) -> f32 {
   %0 = vector.reduction <add>, %arg0 : vector<f32> into f32
   return %0 : f32
@@ -2691,6 +2751,10 @@ func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index {
 //                          4x16                16x3               4x3
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.matrix_multiply
+//===----------------------------------------------------------------------===//
+
 func.func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
   %C = vector.matrix_multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } :
@@ -2717,6 +2781,10 @@ func.func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vecto
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.constant_mask
+//===----------------------------------------------------------------------===//
+
 func.func @constant_mask_0d_f() -> vector<i1> {
   %0 = vector.constant_mask [0] : vector<i1>
   return %0 : vector<i1>
@@ -2810,6 +2878,10 @@ func.func @negative_constant_mask_2d_leading_scalable() -> vector<[4]x4xi1> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.create_mask
+//===----------------------------------------------------------------------===//
+
 func.func @create_mask_0d(%a : index) -> vector<i1> {
   %v = vector.create_mask %a : vector<i1>
   return %v: vector<i1>
@@ -2858,6 +2930,10 @@ func.func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.transpose
+//===----------------------------------------------------------------------===//
+
 func.func @transpose_0d(%arg0: vector<f32>) -> vector<f32> {
   %0 = vector.transpose %arg0, [] : vector<f32> to vector<f32>
   return %0 : vector<f32>
@@ -2869,6 +2945,10 @@ func.func @transpose_0d(%arg0: vector<f32>) -> vector<f32> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.flat_transpose
+//===----------------------------------------------------------------------===//
+
 func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
   %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
      : vector<16xf32> -> vector<16xf32>
@@ -2900,12 +2980,29 @@ func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
 
 // -----
 
-func.func @vector_load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+//===----------------------------------------------------------------------===//
+// vector.load
+//===----------------------------------------------------------------------===//
+
+func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
+  %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
+     : vector<16xf32> -> vector<16xf32>
+  return %0 : vector<16xf32>
+}
+
+// CHECK-LABEL: func @flat_transpose
+// CHECK-SAME:  %[[A:.*]]: vector<16xf32>
+// CHECK:       %[[T:.*]] = llvm.intr.matrix.transpose %[[A]]
+// CHECK-SAME:      {columns = 4 : i32, rows = 4 : i32} :
+// CHECK-SAME:      vector<16xf32> into vector<16xf32>
+// CHECK:       return %[[T]] : vector<16xf32>
+
+func.func @load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
 
-// CHECK-LABEL: func @vector_load
+// CHECK-LABEL: func @load
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2914,12 +3011,12 @@ func.func @vector_load(%memref : memref<200x100xf32>, %i : index, %j : index) ->
 
 // -----
 
-func.func @vector_load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
+func.func @load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<[8]xf32>
   return %0 : vector<[8]xf32>
 }
 
-// CHECK-LABEL: func @vector_load_scalable
+// CHECK-LABEL: func @load_scalable
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2928,12 +3025,12 @@ func.func @vector_load_scalable(%memref : memref<200x100xf32>, %i : index, %j :
 
 // -----
 
-func.func @vector_load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+func.func @load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
 
-// CHECK-LABEL: func @vector_load_nontemporal
+// CHECK-LABEL: func @load_nontemporal
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2942,12 +3039,12 @@ func.func @vector_load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j
 
 // -----
 
-func.func @vector_load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
+func.func @load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
   %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[8]xf32>
   return %0 : vector<[8]xf32>
 }
 
-// CHECK-LABEL: func @vector_load_nontemporal_scalable
+// CHECK-LABEL: func @load_nontemporal_scalable
 // C...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/118112


More information about the Mlir-commits mailing list