[Mlir-commits] [mlir] [mlir][vector][nfc] Update vector-to-llvm.mlir (PR #118112)

Andrzej WarzyƄski llvmlistbot at llvm.org
Fri Dec 6 00:59:37 PST 2024


https://github.com/banach-space updated https://github.com/llvm/llvm-project/pull/118112

>From a543bd6724d4f4f5f11cc2dab13add33605df693 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Fri, 29 Nov 2024 16:39:27 +0000
Subject: [PATCH 1/2] [mlir][vector][nfc] Update vector-to-llvm.mlir

* Adds extra comments to group Ops
* Unifies the test function naming, i.e.
  * `@vector_{op_name}_{variant}` -> `@{op_name}_{variant}`
* Unifies input variable names (`%input` -> `%arg0`)

There's still some inconsistencies within this file - I'm happy to send
more updates if folks find it useful. But I'd definitely recommend
splitting across multiple PRs (otherwise it's hard to review).
---
 .../VectorToLLVM/vector-to-llvm.mlir          | 336 +++++++++++++-----
 1 file changed, 244 insertions(+), 92 deletions(-)

diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 1c42538cf85912..fe69b1a076f9f8 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1,79 +1,81 @@
 // RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
 
-// TODO: Add tests for for vector.type_cast that would cover scalable vectors
+//===----------------------------------------------------------------------===//
+// vector.bticast
+//===----------------------------------------------------------------------===//
 
-func.func @bitcast_f32_to_i32_vector_0d(%input: vector<f32>) -> vector<i32> {
-  %0 = vector.bitcast %input : vector<f32> to vector<i32>
+func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector<f32>) -> vector<i32> {
+  %0 = vector.bitcast %arg0 : vector<f32> to vector<i32>
   return %0 : vector<i32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector_0d
-// CHECK-SAME:  %[[input:.*]]: vector<f32>
-// CHECK:       %[[vec_f32_1d:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<f32> to vector<1xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<f32>
+// CHECK:       %[[vec_f32_1d:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<f32> to vector<1xf32>
 // CHECK:       %[[vec_i32_1d:.*]] = llvm.bitcast %[[vec_f32_1d]] : vector<1xf32> to vector<1xi32>
 // CHECK:       %[[vec_i32_0d:.*]] = builtin.unrealized_conversion_cast %[[vec_i32_1d]] : vector<1xi32> to vector<i32>
 // CHECK:       return %[[vec_i32_0d]] : vector<i32>
 
 // -----
 
-func.func @bitcast_f32_to_i32_vector(%input: vector<16xf32>) -> vector<16xi32> {
-  %0 = vector.bitcast %input : vector<16xf32> to vector<16xi32>
+func.func @bitcast_f32_to_i32_vector(%arg0: vector<16xf32>) -> vector<16xi32> {
+  %0 = vector.bitcast %arg0 : vector<16xf32> to vector<16xi32>
   return %0 : vector<16xi32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector
-// CHECK-SAME:  %[[input:.*]]: vector<16xf32>
-// CHECK:       llvm.bitcast %[[input]] : vector<16xf32> to vector<16xi32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<16xf32>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<16xf32> to vector<16xi32>
 
-func.func @bitcast_f32_to_i32_vector_scalable(%input: vector<[16]xf32>) -> vector<[16]xi32> {
-  %0 = vector.bitcast %input : vector<[16]xf32> to vector<[16]xi32>
+func.func @bitcast_f32_to_i32_vector_scalable(%arg0: vector<[16]xf32>) -> vector<[16]xi32> {
+  %0 = vector.bitcast %arg0 : vector<[16]xf32> to vector<[16]xi32>
   return %0 : vector<[16]xi32>
 }
 
 // CHECK-LABEL: @bitcast_f32_to_i32_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[16]xf32>
-// CHECK:       llvm.bitcast %[[input]] : vector<[16]xf32> to vector<[16]xi32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[16]xf32>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<[16]xf32> to vector<[16]xi32>
 
 // -----
 
-func.func @bitcast_i8_to_f32_vector(%input: vector<64xi8>) -> vector<16xf32> {
-  %0 = vector.bitcast %input : vector<64xi8> to vector<16xf32>
+func.func @bitcast_i8_to_f32_vector(%arg0: vector<64xi8>) -> vector<16xf32> {
+  %0 = vector.bitcast %arg0 : vector<64xi8> to vector<16xf32>
   return %0 : vector<16xf32>
 }
 
 // CHECK-LABEL: @bitcast_i8_to_f32_vector
-// CHECK-SAME:  %[[input:.*]]: vector<64xi8>
-// CHECK:       llvm.bitcast %[[input]] : vector<64xi8> to vector<16xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<64xi8>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<64xi8> to vector<16xf32>
 
-func.func @bitcast_i8_to_f32_vector_scalable(%input: vector<[64]xi8>) -> vector<[16]xf32> {
-  %0 = vector.bitcast %input : vector<[64]xi8> to vector<[16]xf32>
+func.func @bitcast_i8_to_f32_vector_scalable(%arg0: vector<[64]xi8>) -> vector<[16]xf32> {
+  %0 = vector.bitcast %arg0 : vector<[64]xi8> to vector<[16]xf32>
   return %0 : vector<[16]xf32>
 }
 
 // CHECK-LABEL: @bitcast_i8_to_f32_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[64]xi8>
-// CHECK:       llvm.bitcast %[[input]] : vector<[64]xi8> to vector<[16]xf32>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[64]xi8>
+// CHECK:       llvm.bitcast %[[ARG_0]] : vector<[64]xi8> to vector<[16]xf32>
 
 // -----
 
-func.func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
-  %0 = vector.bitcast %input : vector<16xindex> to vector<128xi8>
+func.func @bitcast_index_to_i8_vector(%arg0: vector<16xindex>) -> vector<128xi8> {
+  %0 = vector.bitcast %arg0 : vector<16xindex> to vector<128xi8>
   return %0 : vector<128xi8>
 }
 
 // CHECK-LABEL: @bitcast_index_to_i8_vector
-// CHECK-SAME:  %[[input:.*]]: vector<16xindex>
-// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<16xindex> to vector<16xi64>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<16xindex>
+// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<16xindex> to vector<16xi64>
 // CHECK:       llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8>
 
-func.func @bitcast_index_to_i8_vector_scalable(%input: vector<[16]xindex>) -> vector<[128]xi8> {
-  %0 = vector.bitcast %input : vector<[16]xindex> to vector<[128]xi8>
+func.func @bitcast_index_to_i8_vector_scalable(%arg0: vector<[16]xindex>) -> vector<[128]xi8> {
+  %0 = vector.bitcast %arg0 : vector<[16]xindex> to vector<[128]xi8>
   return %0 : vector<[128]xi8>
 }
 
 // CHECK-LABEL: @bitcast_index_to_i8_vector_scalable
-// CHECK-SAME:  %[[input:.*]]: vector<[16]xindex>
-// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[input]] : vector<[16]xindex> to vector<[16]xi64>
+// CHECK-SAME:  %[[ARG_0:.*]]: vector<[16]xindex>
+// CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<[16]xindex> to vector<[16]xi64>
 // CHECK:       llvm.bitcast %[[T0]] : vector<[16]xi64> to vector<[128]xi8>
 
 // -----
@@ -110,6 +112,10 @@ func.func @bitcast_2d_scalable(%arg0: vector<2x[4]xi32>) -> vector<2x[2]xi64> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.broadcast
+//===----------------------------------------------------------------------===//
+
 func.func @broadcast_vec0d_from_f32(%arg0: f32) -> vector<f32> {
   %0 = vector.broadcast %arg0 : f32 to vector<f32>
   return %0 : vector<f32>
@@ -610,6 +616,10 @@ func.func @broadcast_stretch_in_middle_scalable_v2(%arg0: vector<[4]x1x2xf32>) -
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.outerproduct
+//===----------------------------------------------------------------------===//
+
 func.func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
@@ -758,6 +768,10 @@ func.func @outerproduct_add_scalable(%arg0: vector<2xf32>, %arg1: vector<[3]xf32
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.mask { vector.outerproduct }
+//===----------------------------------------------------------------------===//
+
 func.func @masked_float_add_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: vector<2xf32>, %m: vector<2xi1>) -> vector<2xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<add>} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32>
   return %0 : vector<2xf32>
@@ -996,6 +1010,10 @@ func.func @masked_int_or_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32,
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.shuffle
+//===----------------------------------------------------------------------===//
+
 func.func @shuffle_0D_direct(%arg0: vector<f32>) -> vector<3xf32> {
   %1 = vector.shuffle %arg0, %arg0 [0, 1, 0] : vector<f32>, vector<f32>
   return %1 : vector<3xf32>
@@ -1083,6 +1101,10 @@ func.func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf3
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extractelement
+//===----------------------------------------------------------------------===//
+
 func.func @extractelement_from_vec_0d_f32(%arg0: vector<f32>) -> f32 {
   %1 = vector.extractelement %arg0[] : vector<f32>
   return %1 : f32
@@ -1142,6 +1164,10 @@ func.func @extractelement_from_vec_1d_f32_idx_as_index_scalable(%arg0: vector<[1
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extract
+//===----------------------------------------------------------------------===//
+
 func.func @extract_scalar_from_vec_1d_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.extract %arg0[15]: f32 from vector<16xf32>
   return %0 : f32
@@ -1312,6 +1338,10 @@ func.func @extract_scalar_from_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insertelement
+//===----------------------------------------------------------------------===//
+
 func.func @insertelement_into_vec_0d_f32(%arg0: f32, %arg1: vector<f32>) -> vector<f32> {
   %1 = vector.insertelement %arg0, %arg1[] : vector<f32>
   return %1 : vector<f32>
@@ -1379,6 +1409,10 @@ func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable(%arg0: f
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insert
+//===----------------------------------------------------------------------===//
+
 func.func @insert_scalar_into_vec_1d_f32(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32>
   return %0 : vector<4xf32>
@@ -1538,6 +1572,12 @@ func.func @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[1
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.type_cast
+//
+// TODO: Add tests for for vector.type_cast that would cover scalable vectors
+//===----------------------------------------------------------------------===//
+
 func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref<vector<8x8x8xf32>>
   return %0 : memref<vector<8x8x8xf32>>
@@ -1569,11 +1609,11 @@ func.func @type_cast_index(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8xin
 
 // -----
 
-func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
+func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref<vector<8x8x8xf32>, 3>
   return %0 : memref<vector<8x8x8xf32>, 3>
 }
-// CHECK-LABEL: @vector_type_cast_non_zero_addrspace
+// CHECK-LABEL: @type_cast_non_zero_addrspace
 //       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64)>
 //       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:   llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr<3>, ptr<3>, i64)>
@@ -1586,6 +1626,10 @@ func.func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> m
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.print
+//===----------------------------------------------------------------------===//
+
 func.func @print_scalar_i1(%arg0: i1) {
   vector.print %arg0 : i1
   return
@@ -1772,6 +1816,10 @@ func.func @print_string() {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.extract_strided_slice
+//===----------------------------------------------------------------------===//
+
 func.func @extract_strided_slice_f32_1d_from_1d(%arg0: vector<4xf32>) -> vector<2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>
   return %0 : vector<2xf32>
@@ -1872,6 +1920,10 @@ func.func @extract_strided_slice_f32_2d_from_2d_scalable(%arg0: vector<4x[8]xf32
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.insert_strided_slice
+//===----------------------------------------------------------------------===//
+
 func.func @insert_strided_slice_f32_2d_into_3d(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vector<4x4x4xf32> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xf32> into vector<4x4x4xf32>
   return %0 : vector<4x4x4xf32>
@@ -1998,8 +2050,12 @@ func.func @insert_strided_slice_f32_2d_into_3d_scalable(%arg0: vector<2x[4]xf32>
 
 // -----
 
-func.func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>, %d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
-  // CHECK-LABEL: @vector_fma
+//===----------------------------------------------------------------------===//
+// vector.fma
+//===----------------------------------------------------------------------===//
+
+func.func @fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>, %d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
+  // CHECK-LABEL: @fma
   //  CHECK-SAME: %[[A:.*]]: vector<8xf32>
   //  CHECK-SAME: %[[B:.*]]: vector<2x4xf32>
   //  CHECK-SAME: %[[C:.*]]: vector<1x1x1xf32>
@@ -2033,8 +2089,8 @@ func.func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf
   return %0, %1, %2, %3: vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>
 }
 
-func.func @vector_fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: vector<1x1x[1]xf32>, %d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
-  // CHECK-LABEL: @vector_fma_scalable
+func.func @fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: vector<1x1x[1]xf32>, %d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
+  // CHECK-LABEL: @fma_scalable
   //  CHECK-SAME: %[[A:.*]]: vector<[8]xf32>
   //  CHECK-SAME: %[[B:.*]]: vector<2x[4]xf32>
   //  CHECK-SAME: %[[C:.*]]: vector<1x1x[1]xf32>
@@ -2066,6 +2122,10 @@ func.func @vector_fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: v
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.reduction
+//===----------------------------------------------------------------------===//
+
 func.func @reduce_0d_f32(%arg0: vector<f32>) -> f32 {
   %0 = vector.reduction <add>, %arg0 : vector<f32> into f32
   return %0 : f32
@@ -2691,6 +2751,10 @@ func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index {
 //                          4x16                16x3               4x3
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.matrix_multiply
+//===----------------------------------------------------------------------===//
+
 func.func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
   %C = vector.matrix_multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } :
@@ -2717,6 +2781,10 @@ func.func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vecto
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.constant_mask
+//===----------------------------------------------------------------------===//
+
 func.func @constant_mask_0d_f() -> vector<i1> {
   %0 = vector.constant_mask [0] : vector<i1>
   return %0 : vector<i1>
@@ -2810,6 +2878,10 @@ func.func @negative_constant_mask_2d_leading_scalable() -> vector<[4]x4xi1> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.create_mask
+//===----------------------------------------------------------------------===//
+
 func.func @create_mask_0d(%a : index) -> vector<i1> {
   %v = vector.create_mask %a : vector<i1>
   return %v: vector<i1>
@@ -2858,6 +2930,10 @@ func.func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.transpose
+//===----------------------------------------------------------------------===//
+
 func.func @transpose_0d(%arg0: vector<f32>) -> vector<f32> {
   %0 = vector.transpose %arg0, [] : vector<f32> to vector<f32>
   return %0 : vector<f32>
@@ -2869,6 +2945,10 @@ func.func @transpose_0d(%arg0: vector<f32>) -> vector<f32> {
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.flat_transpose
+//===----------------------------------------------------------------------===//
+
 func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
   %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
      : vector<16xf32> -> vector<16xf32>
@@ -2900,12 +2980,29 @@ func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
 
 // -----
 
-func.func @vector_load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+//===----------------------------------------------------------------------===//
+// vector.load
+//===----------------------------------------------------------------------===//
+
+func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
+  %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
+     : vector<16xf32> -> vector<16xf32>
+  return %0 : vector<16xf32>
+}
+
+// CHECK-LABEL: func @flat_transpose
+// CHECK-SAME:  %[[A:.*]]: vector<16xf32>
+// CHECK:       %[[T:.*]] = llvm.intr.matrix.transpose %[[A]]
+// CHECK-SAME:      {columns = 4 : i32, rows = 4 : i32} :
+// CHECK-SAME:      vector<16xf32> into vector<16xf32>
+// CHECK:       return %[[T]] : vector<16xf32>
+
+func.func @load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
 
-// CHECK-LABEL: func @vector_load
+// CHECK-LABEL: func @load
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2914,12 +3011,12 @@ func.func @vector_load(%memref : memref<200x100xf32>, %i : index, %j : index) ->
 
 // -----
 
-func.func @vector_load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
+func.func @load_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<[8]xf32>
   return %0 : vector<[8]xf32>
 }
 
-// CHECK-LABEL: func @vector_load_scalable
+// CHECK-LABEL: func @load_scalable
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2928,12 +3025,12 @@ func.func @vector_load_scalable(%memref : memref<200x100xf32>, %i : index, %j :
 
 // -----
 
-func.func @vector_load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+func.func @load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
 
-// CHECK-LABEL: func @vector_load_nontemporal
+// CHECK-LABEL: func @load_nontemporal
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2942,12 +3039,12 @@ func.func @vector_load_nontemporal(%memref : memref<200x100xf32>, %i : index, %j
 
 // -----
 
-func.func @vector_load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
+func.func @load_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<[8]xf32> {
   %0 = vector.load %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[8]xf32>
   return %0 : vector<[8]xf32>
 }
 
-// CHECK-LABEL: func @vector_load_nontemporal_scalable
+// CHECK-LABEL: func @load_nontemporal_scalable
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -2956,34 +3053,34 @@ func.func @vector_load_nontemporal_scalable(%memref : memref<200x100xf32>, %i :
 
 // -----
 
-func.func @vector_load_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> {
+func.func @load_index(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<8xindex> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<8xindex>
   return %0 : vector<8xindex>
 }
-// CHECK-LABEL: func @vector_load_index
+// CHECK-LABEL: func @load_index
 // CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xi64>
 // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<8xi64> to vector<8xindex>
 // CHECK: return %[[T1]] : vector<8xindex>
 
 // -----
 
-func.func @vector_load_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<[8]xindex> {
+func.func @load_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) -> vector<[8]xindex> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xindex>, vector<[8]xindex>
   return %0 : vector<[8]xindex>
 }
-// CHECK-LABEL: func @vector_load_index_scalable
+// CHECK-LABEL: func @load_index_scalable
 // CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<[8]xi64>
 // CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<[8]xi64> to vector<[8]xindex>
 // CHECK: return %[[T1]] : vector<[8]xindex>
 
 // -----
 
-func.func @vector_load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<f32> {
+func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<f32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<f32>
   return %0 : vector<f32>
 }
 
-// CHECK-LABEL: func @vector_load_0d
+// CHECK-LABEL: func @load_0d
 // CHECK: %[[load:.*]] = memref.load %{{.*}}[%{{.*}}, %{{.*}}]
 // CHECK: %[[vec:.*]] = llvm.mlir.undef : vector<1xf32>
 // CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -2993,14 +3090,17 @@ func.func @vector_load_0d(%memref : memref<200x100xf32>, %i : index, %j : index)
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.store
+//===----------------------------------------------------------------------===//
 
-func.func @vector_store(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @store(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<4xf32>
   vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<4xf32>
   return
 }
 
-// CHECK-LABEL: func @vector_store
+// CHECK-LABEL: func @store
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -3009,13 +3109,13 @@ func.func @vector_store(%memref : memref<200x100xf32>, %i : index, %j : index) {
 
 // -----
 
-func.func @vector_store_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @store_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<[4]xf32>
   vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<[4]xf32>
   return
 }
 
-// CHECK-LABEL: func @vector_store_scalable
+// CHECK-LABEL: func @store_scalable
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -3024,13 +3124,13 @@ func.func @vector_store_scalable(%memref : memref<200x100xf32>, %i : index, %j :
 
 // -----
 
-func.func @vector_store_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @store_nontemporal(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<4xf32>
   vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<4xf32>
   return
 }
 
-// CHECK-LABEL: func @vector_store_nontemporal
+// CHECK-LABEL: func @store_nontemporal
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -3039,13 +3139,13 @@ func.func @vector_store_nontemporal(%memref : memref<200x100xf32>, %i : index, %
 
 // -----
 
-func.func @vector_store_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @store_nontemporal_scalable(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<[4]xf32>
   vector.store %val, %memref[%i, %j] {nontemporal = true} : memref<200x100xf32>, vector<[4]xf32>
   return
 }
 
-// CHECK-LABEL: func @vector_store_nontemporal_scalable
+// CHECK-LABEL: func @store_nontemporal_scalable
 // CHECK: %[[c100:.*]] = llvm.mlir.constant(100 : index) : i64
 // CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]]  : i64
 // CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}}  : i64
@@ -3054,33 +3154,33 @@ func.func @vector_store_nontemporal_scalable(%memref : memref<200x100xf32>, %i :
 
 // -----
 
-func.func @vector_store_index(%memref : memref<200x100xindex>, %i : index, %j : index) {
+func.func @store_index(%memref : memref<200x100xindex>, %i : index, %j : index) {
   %val = arith.constant dense<11> : vector<4xindex>
   vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<4xindex>
   return
 }
-// CHECK-LABEL: func @vector_store_index
+// CHECK-LABEL: func @store_index
 // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xi64>, !llvm.ptr
 
 // -----
 
-func.func @vector_store_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) {
+func.func @store_index_scalable(%memref : memref<200x100xindex>, %i : index, %j : index) {
   %val = arith.constant dense<11> : vector<[4]xindex>
   vector.store %val, %memref[%i, %j] : memref<200x100xindex>, vector<[4]xindex>
   return
 }
-// CHECK-LABEL: func @vector_store_index_scalable
+// CHECK-LABEL: func @store_index_scalable
 // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<[4]xi64>, !llvm.ptr
 
 // -----
 
-func.func @vector_store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
+func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
   %val = arith.constant dense<11.0> : vector<f32>
   vector.store %val, %memref[%i, %j] : memref<200x100xf32>, vector<f32>
   return
 }
 
-// CHECK-LABEL: func @vector_store_0d
+// CHECK-LABEL: func @store_0d
 // CHECK: %[[val:.*]] = arith.constant dense<1.100000e+01> : vector<f32>
 // CHECK: %[[cast:.*]] = builtin.unrealized_conversion_cast %[[val]] : vector<f32> to vector<1xf32>
 // CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -3089,6 +3189,10 @@ func.func @vector_store_0d(%memref : memref<200x100xf32>, %i : index, %j : index
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.maskedload
+//===----------------------------------------------------------------------===//
+
 func.func @masked_load(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
   %c0 = arith.constant 0: index
   %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
@@ -3139,6 +3243,10 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.maskedstore
+//===----------------------------------------------------------------------===//
+
 func.func @masked_store(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
   %c0 = arith.constant 0: index
   vector.maskedstore %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32>
@@ -3187,6 +3295,10 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.gather
+//===----------------------------------------------------------------------===//
+
 func.func @gather(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> {
   %0 = arith.constant 0: index
   %1 = vector.gather %arg0[%0][%arg1], %arg2, %arg3 : memref<?xf32>, vector<3xi32>, vector<3xi1>, vector<3xf32> into vector<3xf32>
@@ -3398,6 +3510,10 @@ func.func @gather_1d_from_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]x
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.scatter
+//===----------------------------------------------------------------------===//
+
 func.func @scatter(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) {
   %0 = arith.constant 0: index
   vector.scatter %arg0[%0][%arg1], %arg2, %arg3 : memref<?xf32>, vector<3xi32>, vector<3xi1>, vector<3xf32>
@@ -3472,6 +3588,10 @@ func.func @scatter_1d_into_2d_scalable(%arg0: memref<4x?xf32>, %arg1: vector<[4]
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.expandload
+//===----------------------------------------------------------------------===//
+
 func.func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> {
   %c0 = arith.constant 0: index
   %0 = vector.expandload %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<11xi1>, vector<11xf32> into vector<11xf32>
@@ -3497,6 +3617,10 @@ func.func @expand_load_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>, %a
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.compressstore
+//===----------------------------------------------------------------------===//
+
 func.func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) {
   %c0 = arith.constant 0: index
   vector.compressstore %arg0[%c0], %arg1, %arg2 : memref<?xf32>, vector<11xi1>, vector<11xf32>
@@ -3521,6 +3645,10 @@ func.func @compress_store_op_index(%arg0: memref<?xindex>, %arg1: vector<11xi1>,
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.splat
+//===----------------------------------------------------------------------===//
+
 // CHECK-LABEL: @splat_0d
 // CHECK-SAME: %[[ARG:.*]]: f32
 func.func @splat_0d(%a: f32) -> vector<f32> {
@@ -3569,9 +3697,13 @@ func.func @splat_scalable(%a: vector<[4]xf32>, %b: f32) -> vector<[4]xf32> {
 
 // -----
 
-// CHECK-LABEL: @vector_scalable_insert
+//===----------------------------------------------------------------------===//
+// vector.scalable_insert
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @scalable_insert
 // CHECK-SAME: %[[SUB:.*]]: vector<4xf32>, %[[SV:.*]]: vector<[4]xf32>
-func.func @vector_scalable_insert(%sub: vector<4xf32>, %dsv: vector<[4]xf32>) -> vector<[4]xf32> {
+func.func @scalable_insert(%sub: vector<4xf32>, %dsv: vector<[4]xf32>) -> vector<[4]xf32> {
   // CHECK-NEXT: %[[TMP:.*]] = llvm.intr.vector.insert %[[SUB]], %[[SV]][0] : vector<4xf32> into vector<[4]xf32>
   %0 = vector.scalable.insert %sub, %dsv[0] : vector<4xf32> into vector<[4]xf32>
   // CHECK-NEXT: llvm.intr.vector.insert %[[SUB]], %[[TMP]][4] : vector<4xf32> into vector<[4]xf32>
@@ -3581,9 +3713,13 @@ func.func @vector_scalable_insert(%sub: vector<4xf32>, %dsv: vector<[4]xf32>) ->
 
 // -----
 
-// CHECK-LABEL: @vector_scalable_extract
+//===----------------------------------------------------------------------===//
+// vector.scalable_extract
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @scalable_extract
 // CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32>
-func.func @vector_scalable_extract(%vec: vector<[4]xf32>) -> vector<8xf32> {
+func.func @scalable_extract(%vec: vector<[4]xf32>) -> vector<8xf32> {
   // CHECK-NEXT: %{{.*}} = llvm.intr.vector.extract %[[VEC]][0] : vector<8xf32> from vector<[4]xf32>
   %0 = vector.scalable.extract %vec[0] : vector<8xf32> from vector<[4]xf32>
   return %0 : vector<8xf32>
@@ -3591,9 +3727,13 @@ func.func @vector_scalable_extract(%vec: vector<[4]xf32>) -> vector<8xf32> {
 
 // -----
 
-// CHECK-LABEL: @vector_interleave_0d
+//===----------------------------------------------------------------------===//
+// vector.interleave
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @interleave_0d
 //  CHECK-SAME:     %[[LHS:.*]]: vector<i8>, %[[RHS:.*]]: vector<i8>)
-func.func @vector_interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8> {
+func.func @interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8> {
   // CHECK-DAG: %[[LHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[LHS]] : vector<i8> to vector<1xi8>
   // CHECK-DAG: %[[RHS_RANK1:.*]] = builtin.unrealized_conversion_cast %[[RHS]] : vector<i8> to vector<1xi8>
   // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS_RANK1]], %[[RHS_RANK1]] [0, 1] : vector<1xi8>
@@ -3604,9 +3744,9 @@ func.func @vector_interleave_0d(%a: vector<i8>, %b: vector<i8>) -> vector<2xi8>
 
 // -----
 
-// CHECK-LABEL: @vector_interleave_1d
+// CHECK-LABEL: @interleave_1d
 //  CHECK-SAME:     %[[LHS:.*]]: vector<8xf32>, %[[RHS:.*]]: vector<8xf32>)
-func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32> {
+func.func @interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<16xf32> {
   // CHECK: %[[ZIP:.*]] = llvm.shufflevector %[[LHS]], %[[RHS]] [0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15] : vector<8xf32>
   // CHECK: return %[[ZIP]]
   %0 = vector.interleave %a, %b : vector<8xf32> -> vector<16xf32>
@@ -3615,9 +3755,9 @@ func.func @vector_interleave_1d(%a: vector<8xf32>, %b: vector<8xf32>) -> vector<
 
 // -----
 
-// CHECK-LABEL: @vector_interleave_1d_scalable
+// CHECK-LABEL: @interleave_1d_scalable
 //  CHECK-SAME:     %[[LHS:.*]]: vector<[4]xi32>, %[[RHS:.*]]: vector<[4]xi32>)
-func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32> {
+func.func @interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32>) -> vector<[8]xi32> {
   // CHECK: %[[ZIP:.*]] = "llvm.intr.vector.interleave2"(%[[LHS]], %[[RHS]]) : (vector<[4]xi32>, vector<[4]xi32>) -> vector<[8]xi32>
   // CHECK: return %[[ZIP]]
   %0 = vector.interleave %a, %b : vector<[4]xi32> -> vector<[8]xi32>
@@ -3626,9 +3766,9 @@ func.func @vector_interleave_1d_scalable(%a: vector<[4]xi32>, %b: vector<[4]xi32
 
 // -----
 
-// CHECK-LABEL: @vector_interleave_2d
+// CHECK-LABEL: @interleave_2d
 //  CHECK-SAME:     %[[LHS:.*]]: vector<2x3xi8>, %[[RHS:.*]]: vector<2x3xi8>)
-func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8> {
+func.func @interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vector<2x6xi8> {
   // CHECK: llvm.shufflevector
   // CHECK-NOT: vector.interleave {{.*}} : vector<2x3xi8>
   %0 = vector.interleave %a, %b : vector<2x3xi8> -> vector<2x6xi8>
@@ -3637,9 +3777,9 @@ func.func @vector_interleave_2d(%a: vector<2x3xi8>, %b: vector<2x3xi8>) -> vecto
 
 // -----
 
-// CHECK-LABEL: @vector_interleave_2d_scalable
+// CHECK-LABEL: @interleave_2d_scalable
 //  CHECK-SAME:     %[[LHS:.*]]: vector<2x[8]xi16>, %[[RHS:.*]]: vector<2x[8]xi16>)
-func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16> {
+func.func @interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]xi16>) -> vector<2x[16]xi16> {
   // CHECK: llvm.intr.vector.interleave2
   // CHECK-NOT: vector.interleave {{.*}} : vector<2x[8]xi16>
   %0 = vector.interleave %a, %b : vector<2x[8]xi16> -> vector<2x[16]xi16>
@@ -3648,9 +3788,13 @@ func.func @vector_interleave_2d_scalable(%a: vector<2x[8]xi16>, %b: vector<2x[8]
 
 // -----
 
-// CHECK-LABEL: @vector_deinterleave_1d
+//===----------------------------------------------------------------------===//
+// vector.deinterleave
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @deinterleave_1d
 // CHECK-SAME:  (%[[SRC:.*]]: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>)
-func.func @vector_deinterleave_1d(%a: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) {
+func.func @deinterleave_1d(%a: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>) {
   // CHECK: %[[POISON:.*]] = llvm.mlir.poison : vector<4xi32>
   // CHECK: llvm.shufflevector %[[SRC]], %[[POISON]] [0, 2] : vector<4xi32>
   // CHECK: llvm.shufflevector %[[SRC]], %[[POISON]] [1, 3] : vector<4xi32>
@@ -3658,9 +3802,9 @@ func.func @vector_deinterleave_1d(%a: vector<4xi32>) -> (vector<2xi32>, vector<2
   return %0, %1 : vector<2xi32>, vector<2xi32>
 }
 
-// CHECK-LABEL: @vector_deinterleave_1d_scalable
+// CHECK-LABEL: @deinterleave_1d_scalable
 // CHECK-SAME:  %[[SRC:.*]]: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>)
-func.func @vector_deinterleave_1d_scalable(%a: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) {
+func.func @deinterleave_1d_scalable(%a: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) {
     // CHECK: %[[RES:.*]] = "llvm.intr.vector.deinterleave2"(%[[SRC]]) : (vector<[4]xi32>) -> !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)>
     // CHECK: llvm.extractvalue %[[RES]][0] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)>
     // CHECK: llvm.extractvalue %[[RES]][1] : !llvm.struct<(vector<[2]xi32>, vector<[2]xi32>)>
@@ -3668,16 +3812,16 @@ func.func @vector_deinterleave_1d_scalable(%a: vector<[4]xi32>) -> (vector<[2]xi
     return %0, %1 : vector<[2]xi32>, vector<[2]xi32>
 }
 
-// CHECK-LABEL: @vector_deinterleave_2d
+// CHECK-LABEL: @deinterleave_2d
 // CHECK-SAME: %[[SRC:.*]]: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>)
-func.func @vector_deinterleave_2d(%a: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>) {
+func.func @deinterleave_2d(%a: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>) {
   // CHECK: llvm.shufflevector
   // CHECK-NOT: vector.deinterleave %{{.*}} : vector<2x8xf32>
   %0, %1 = vector.deinterleave %a : vector<2x8xf32> -> vector<2x4xf32>
   return %0, %1 : vector<2x4xf32>, vector<2x4xf32>
 }
 
-func.func @vector_deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[4]xf32>, vector<2x[4]xf32>) {
+func.func @deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[4]xf32>, vector<2x[4]xf32>) {
     // CHECK: llvm.intr.vector.deinterleave2
     // CHECK-NOT: vector.deinterleave %{{.*}} : vector<2x[8]xf32>
     %0, %1 = vector.deinterleave %a : vector<2x[8]xf32> -> vector<2x[4]xf32>
@@ -3686,7 +3830,11 @@ func.func @vector_deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[
 
 // -----
 
-// CHECK-LABEL: func.func @vector_from_elements_1d(
+//===----------------------------------------------------------------------===//
+// vector.from_elements
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: func.func @from_elements_1d(
 //  CHECK-SAME:     %[[a:.*]]: f32, %[[b:.*]]: f32)
 //       CHECK:   %[[undef:.*]] = llvm.mlir.undef : vector<3xf32>
 //       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : i64) : i64
@@ -3696,42 +3844,46 @@ func.func @vector_deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[
 //       CHECK:   %[[c2:.*]] = llvm.mlir.constant(2 : i64) : i64
 //       CHECK:   %[[insert2:.*]] = llvm.insertelement %[[a]], %[[insert1]][%[[c2]] : i64] : vector<3xf32>
 //       CHECK:   return %[[insert2]]
-func.func @vector_from_elements_1d(%a: f32, %b: f32) -> vector<3xf32> {
+func.func @from_elements_1d(%a: f32, %b: f32) -> vector<3xf32> {
   %0 = vector.from_elements %a, %b, %a : vector<3xf32>
   return %0 : vector<3xf32>
 }
 
 // -----
 
-// CHECK-LABEL: func.func @vector_from_elements_0d(
+// CHECK-LABEL: func.func @from_elements_0d(
 //  CHECK-SAME:     %[[a:.*]]: f32)
 //       CHECK:   %[[undef:.*]] = llvm.mlir.undef : vector<1xf32>
 //       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : i64) : i64
 //       CHECK:   %[[insert0:.*]] = llvm.insertelement %[[a]], %[[undef]][%[[c0]] : i64] : vector<1xf32>
 //       CHECK:   %[[cast:.*]] = builtin.unrealized_conversion_cast %[[insert0]] : vector<1xf32> to vector<f32>
 //       CHECK:   return %[[cast]]
-func.func @vector_from_elements_0d(%a: f32) -> vector<f32> {
+func.func @from_elements_0d(%a: f32) -> vector<f32> {
   %0 = vector.from_elements %a : vector<f32>
   return %0 : vector<f32>
 }
 
 // -----
 
-// CHECK-LABEL: @vector_step_scalable
+//===----------------------------------------------------------------------===//
+// vector.step
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @step_scalable
 // CHECK: %[[STEPVECTOR:.*]] = llvm.intr.stepvector : vector<[4]xi64>
 // CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex>
 // CHECK: return %[[CAST]] : vector<[4]xindex>
-func.func @vector_step_scalable() -> vector<[4]xindex> {
+func.func @step_scalable() -> vector<[4]xindex> {
   %0 = vector.step : vector<[4]xindex>
   return %0 : vector<[4]xindex>
 }
 
 // -----
 
-// CHECK-LABEL: @vector_step
+// CHECK-LABEL: @step
 // CHECK: %[[CST:.+]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex>
 // CHECK: return %[[CST]] : vector<4xindex>
-func.func @vector_step() -> vector<4xindex> {
+func.func @step() -> vector<4xindex> {
   %0 = vector.step : vector<4xindex>
   return %0 : vector<4xindex>
 }

>From 60e1559fcace3888ad52f884541ad7e8f5096a81 Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Fri, 6 Dec 2024 08:55:31 +0000
Subject: [PATCH 2/2] fixup! [mlir][vector][nfc] Update vector-to-llvm.mlir

* Add missing "split" seperators (`// -----`)
* Capitialize LIT variables (e.g. %[[insert]] --> %[[INSERT]])
* Renamed some func variables to either better align with the convention in the
  file (e.g. `%a` -> `%vec_1d`) or to make sure the name is more
  descriptive (e.g. `%a` -> `%num_elems`)
* Other typos highlighted by Hugo (thanks!)
---
 .../VectorToLLVM/vector-to-llvm.mlir          | 372 +++++++++++++-----
 1 file changed, 273 insertions(+), 99 deletions(-)

diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index fe69b1a076f9f8..8d20d39592b04a 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1,7 +1,7 @@
 // RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// vector.bticast
+// vector.bitcast
 //===----------------------------------------------------------------------===//
 
 func.func @bitcast_f32_to_i32_vector_0d(%arg0: vector<f32>) -> vector<i32> {
@@ -23,10 +23,13 @@ func.func @bitcast_f32_to_i32_vector(%arg0: vector<16xf32>) -> vector<16xi32> {
   return %0 : vector<16xi32>
 }
 
+
 // CHECK-LABEL: @bitcast_f32_to_i32_vector
 // CHECK-SAME:  %[[ARG_0:.*]]: vector<16xf32>
 // CHECK:       llvm.bitcast %[[ARG_0]] : vector<16xf32> to vector<16xi32>
 
+// -----
+
 func.func @bitcast_f32_to_i32_vector_scalable(%arg0: vector<[16]xf32>) -> vector<[16]xi32> {
   %0 = vector.bitcast %arg0 : vector<[16]xf32> to vector<[16]xi32>
   return %0 : vector<[16]xi32>
@@ -47,6 +50,8 @@ func.func @bitcast_i8_to_f32_vector(%arg0: vector<64xi8>) -> vector<16xf32> {
 // CHECK-SAME:  %[[ARG_0:.*]]: vector<64xi8>
 // CHECK:       llvm.bitcast %[[ARG_0]] : vector<64xi8> to vector<16xf32>
 
+// -----
+
 func.func @bitcast_i8_to_f32_vector_scalable(%arg0: vector<[64]xi8>) -> vector<[16]xf32> {
   %0 = vector.bitcast %arg0 : vector<[64]xi8> to vector<[16]xf32>
   return %0 : vector<[16]xf32>
@@ -68,6 +73,8 @@ func.func @bitcast_index_to_i8_vector(%arg0: vector<16xindex>) -> vector<128xi8>
 // CHECK:       %[[T0:.*]] = builtin.unrealized_conversion_cast %[[ARG_0]] : vector<16xindex> to vector<16xi64>
 // CHECK:       llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8>
 
+// -----
+
 func.func @bitcast_index_to_i8_vector_scalable(%arg0: vector<[16]xindex>) -> vector<[128]xi8> {
   %0 = vector.bitcast %arg0 : vector<[16]xindex> to vector<[128]xi8>
   return %0 : vector<[128]xi8>
@@ -148,6 +155,7 @@ func.func @broadcast_vec1d_from_f32(%arg0: f32) -> vector<2xf32> {
 // CHECK:       %[[T1:.*]] = llvm.shufflevector %[[T0]]
 // CHECK:       return %[[T1]] : vector<2xf32>
 
+// -----
 
 func.func @broadcast_vec1d_from_f32_scalable(%arg0: f32) -> vector<[2]xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<[2]xf32>
@@ -173,6 +181,8 @@ func.func @broadcast_vec1d_from_index(%arg0: index) -> vector<2xindex> {
 // CHECK:       %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : vector<2xi64> to vector<2xindex>
 // CHECK:       return %[[T2]] : vector<2xindex>
 
+// -----
+
 func.func @broadcast_vec1d_from_index_scalable(%arg0: index) -> vector<[2]xindex> {
   %0 = vector.broadcast %arg0 : index to vector<[2]xindex>
   return %0 : vector<[2]xindex>
@@ -200,6 +210,8 @@ func.func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
 // CHECK:       %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T3]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
 // CHECK:       return %[[T4]] : vector<2x3xf32>
 
+// -----
+
 func.func @broadcast_vec2d_from_scalar_scalable(%arg0: f32) -> vector<2x[3]xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x[3]xf32>
   return %0 : vector<2x[3]xf32>
@@ -229,6 +241,7 @@ func.func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
 // CHECK:       %[[T4:.*]] = builtin.unrealized_conversion_cast %[[T3]] : !llvm.array<2 x array<3 x vector<4xf32>>> to vector<2x3x4xf32>
 // CHECK:       return %[[T4]] : vector<2x3x4xf32>
 
+// -----
 
 func.func @broadcast_vec3d_from_scalar_scalable(%arg0: f32) -> vector<2x3x[4]xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x3x[4]xf32>
@@ -254,6 +267,8 @@ func.func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> {
 // CHECK-SAME:  %[[A:.*]]: vector<2xf32>)
 // CHECK:       return %[[A]] : vector<2xf32>
 
+// -----
+
 func.func @broadcast_vec1d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector<[2]xf32> {
   %0 = vector.broadcast %arg0 : vector<[2]xf32> to vector<[2]xf32>
   return %0 : vector<[2]xf32>
@@ -299,6 +314,8 @@ func.func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
 // CHECK:       %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : !llvm.array<3 x vector<2xf32>> to vector<3x2xf32>
 // CHECK:       return %[[T5]] : vector<3x2xf32>
 
+// -----
+
 func.func @broadcast_vec2d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector<3x[2]xf32> {
   %0 = vector.broadcast %arg0 : vector<[2]xf32> to vector<3x[2]xf32>
   return %0 : vector<3x[2]xf32>
@@ -329,6 +346,8 @@ func.func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x
 // CHECK:       %[[T4:.*]] = builtin.unrealized_conversion_cast %{{.*}} : !llvm.array<3 x vector<2xi64>> to vector<3x2xindex>
 // CHECK:       return %[[T4]] : vector<3x2xindex>
 
+// -----
+
 func.func @broadcast_vec2d_from_index_vec1d_scalable(%arg0: vector<[2]xindex>) -> vector<3x[2]xindex> {
   %0 = vector.broadcast %arg0 : vector<[2]xindex> to vector<3x[2]xindex>
   return %0 : vector<3x[2]xindex>
@@ -368,6 +387,8 @@ func.func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32>
 // CHECK:       %[[T11:.*]] = builtin.unrealized_conversion_cast %[[T10]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
 // CHECK:       return %[[T11]] : vector<4x3x2xf32>
 
+// -----
+
 func.func @broadcast_vec3d_from_vec1d_scalable(%arg0: vector<[2]xf32>) -> vector<4x3x[2]xf32> {
   %0 = vector.broadcast %arg0 : vector<[2]xf32> to vector<4x3x[2]xf32>
   return %0 : vector<4x3x[2]xf32>
@@ -409,6 +430,8 @@ func.func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf3
 // CHECK:       %[[T10:.*]] = builtin.unrealized_conversion_cast %[[T9]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
 // CHECK:       return %[[T10]] : vector<4x3x2xf32>
 
+// -----
+
 func.func @broadcast_vec3d_from_vec2d_scalable(%arg0: vector<3x[2]xf32>) -> vector<4x3x[2]xf32> {
   %0 = vector.broadcast %arg0 : vector<3x[2]xf32> to vector<4x3x[2]xf32>
   return %0 : vector<4x3x[2]xf32>
@@ -440,6 +463,8 @@ func.func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
 // CHECK:       %[[T4:.*]] = llvm.shufflevector %[[T3]]
 // CHECK:       return %[[T4]] : vector<4xf32>
 
+// -----
+
 func.func @broadcast_stretch_scalable(%arg0: vector<1xf32>) -> vector<[4]xf32> {
   %0 = vector.broadcast %arg0 : vector<1xf32> to vector<[4]xf32>
   return %0 : vector<[4]xf32>
@@ -470,6 +495,8 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32>
 // CHECK:       %[[T8:.*]] = builtin.unrealized_conversion_cast %[[T7]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32>
 // CHECK:       return %[[T8]] : vector<3x4xf32>
 
+// -----
+
 func.func @broadcast_stretch_at_start_scalable(%arg0: vector<1x[4]xf32>) -> vector<3x[4]xf32> {
   %0 = vector.broadcast %arg0 : vector<1x[4]xf32> to vector<3x[4]xf32>
   return %0 : vector<3x[4]xf32>
@@ -570,6 +597,8 @@ func.func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2
 // CHECK:       %[[T32:.*]] = builtin.unrealized_conversion_cast %[[T31]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
 // CHECK:       return %[[T32]] : vector<4x3x2xf32>
 
+// -----
+
 func.func @broadcast_stretch_in_middle_scalable_v1(%arg0: vector<4x1x[2]xf32>) -> vector<4x3x[2]xf32> {
   %0 = vector.broadcast %arg0 : vector<4x1x[2]xf32> to vector<4x3x[2]xf32>
   return %0 : vector<4x3x[2]xf32>
@@ -604,6 +633,8 @@ func.func @broadcast_stretch_in_middle_scalable_v1(%arg0: vector<4x1x[2]xf32>) -
 // CHECK:       %[[T32:.*]] = builtin.unrealized_conversion_cast %[[T31]] : !llvm.array<4 x array<3 x vector<[2]xf32>>> to vector<4x3x[2]xf32>
 // CHECK:       return %[[T32]] : vector<4x3x[2]xf32>
 
+// -----
+
 // TODO: Add support for scalable vectors
 
 func.func @broadcast_stretch_in_middle_scalable_v2(%arg0: vector<[4]x1x2xf32>) -> vector<[4]x3x2xf32> {
@@ -644,6 +675,8 @@ func.func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x
 // CHECK:       %[[T14:.*]] = builtin.unrealized_conversion_cast %[[T13]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
 // CHECK:       return %[[T14]] : vector<2x3xf32>
 
+// -----
+
 func.func @outerproduct_scalable(%arg0: vector<2xf32>, %arg1: vector<[3]xf32>) -> vector<2x[3]xf32> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xf32>, vector<[3]xf32>
   return %2 : vector<2x[3]xf32>
@@ -689,6 +722,8 @@ func.func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) ->
 // CHECK:       %[[T7:.*]] = builtin.unrealized_conversion_cast %[[T6]] : vector<3xindex> to vector<3xi64>
 // CHECK:       %{{.*}} = llvm.insertvalue %[[T7]], %[[T8]][0] : !llvm.array<2 x vector<3xi64>>
 
+// -----
+
 func.func @outerproduct_index_scalable(%arg0: vector<2xindex>, %arg1: vector<[3]xindex>) -> vector<2x[3]xindex> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xindex>, vector<[3]xindex>
   return %2 : vector<2x[3]xindex>
@@ -738,6 +773,8 @@ func.func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: v
 // CHECK:       %[[T19:.*]] = builtin.unrealized_conversion_cast %[[T18]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
 // CHECK:       return %[[T19]] : vector<2x3xf32>
 
+// -----
+
 func.func @outerproduct_add_scalable(%arg0: vector<2xf32>, %arg1: vector<[3]xf32>, %arg2: vector<2x[3]xf32>) -> vector<2x[3]xf32> {
   %2 = vector.outerproduct %arg0, %arg1, %arg2 : vector<2xf32>, vector<[3]xf32>
   return %2 : vector<2x[3]xf32>
@@ -782,6 +819,8 @@ func.func @masked_float_add_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v
 // CHECK:           %[[VAL_8:.*]] = llvm.intr.fmuladd(%[[VAL_0]], %{{.*}}, %[[VAL_2]])  : (vector<2xf32>, vector<2xf32>, vector<2xf32>) -> vector<2xf32>
 // CHECK:           %[[VAL_9:.*]] = arith.select %[[VAL_3]], %[[VAL_8]], %[[VAL_2]] : vector<2xi1>, vector<2xf32>
 
+// -----
+
 func.func @masked_float_add_outerprod_scalable(%arg0: vector<[2]xf32>, %arg1: f32, %arg2: vector<[2]xf32>, %m: vector<[2]xi1>) -> vector<[2]xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<add>} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32>
   return %0 : vector<[2]xf32>
@@ -805,6 +844,8 @@ func.func @masked_float_mul_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v
 // CHECK:           %[[VAL_9:.*]] = arith.mulf %[[VAL_8]], %[[VAL_2]] : vector<2xf32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xf32>
 
+// -----
+
 func.func @masked_float_mul_outerprod_scalable(%arg0: vector<[2]xf32>, %arg1: f32, %arg2: vector<[2]xf32>, %m: vector<[2]xi1>) -> vector<[2]xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<mul>} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32>
   return %0 : vector<[2]xf32>
@@ -829,6 +870,8 @@ func.func @masked_float_max_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v
 // CHECK:           %[[VAL_9:.*]] = arith.maxnumf %[[VAL_8]], %[[VAL_2]] : vector<2xf32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xf32>
 
+// -----
+
 func.func @masked_float_max_outerprod_scalable(%arg0: vector<[2]xf32>, %arg1: f32, %arg2: vector<[2]xf32>, %m: vector<[2]xi1>) -> vector<[2]xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<maxnumf>} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32>
   return %0 : vector<[2]xf32>
@@ -853,6 +896,8 @@ func.func @masked_float_min_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v
 // CHECK:           %[[VAL_9:.*]] = arith.minnumf %[[VAL_8]], %[[VAL_2]] : vector<2xf32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xf32>
 
+// -----
+
 func.func @masked_float_min_outerprod_scalable(%arg0: vector<[2]xf32>, %arg1: f32, %arg2: vector<[2]xf32>, %m: vector<[2]xi1>) -> vector<[2]xf32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<minnumf>} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32>
   return %0 : vector<[2]xf32>
@@ -877,6 +922,8 @@ func.func @masked_int_add_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vec
 // CHECK:           %[[VAL_9:.*]] = arith.addi %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_add_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<add>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -901,6 +948,8 @@ func.func @masked_int_mul_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vec
 // CHECK:           %[[VAL_9:.*]] = arith.muli %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_mul_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<mul>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -925,6 +974,8 @@ func.func @masked_int_max_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vec
 // CHECK:           %[[VAL_9:.*]] = arith.maxsi %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_max_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<maxsi>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -949,6 +1000,8 @@ func.func @masked_int_min_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vec
 // CHECK:           %[[VAL_9:.*]] = arith.minui %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_min_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<minui>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -973,6 +1026,8 @@ func.func @masked_int_and_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vec
 // CHECK:           %[[VAL_9:.*]] = arith.andi %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_and_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<and>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -997,6 +1052,8 @@ func.func @masked_int_or_outerprod(%arg0: vector<2xi32>, %arg1: i32, %arg2: vect
 // CHECK:           %[[VAL_9:.*]] = arith.ori %[[VAL_8]], %[[VAL_2]] : vector<2xi32>
 // CHECK:           %[[VAL_10:.*]] = arith.select %[[VAL_3]], %[[VAL_9]], %[[VAL_2]] : vector<2xi1>, vector<2xi32>
 
+// -----
+
 func.func @masked_int_or_outerprod_scalable(%arg0: vector<[2]xi32>, %arg1: i32, %arg2: vector<[2]xi32>, %m: vector<[2]xi1>) -> vector<[2]xi32> {
   %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind<or>} : vector<[2]xi32>, i32 } : vector<[2]xi1> -> vector<[2]xi32>
   return %0 : vector<[2]xi32>
@@ -1126,6 +1183,8 @@ func.func @extractelement_from_vec_1d_f32_idx_as_i32(%arg0: vector<16xf32>) -> f
 //       CHECK:   %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : i32] : vector<16xf32>
 //       CHECK:   return %[[x]] : f32
 
+// -----
+
 func.func @extractelement_from_vec_1d_f32_idx_as_i32_scalable(%arg0: vector<[16]xf32>) -> f32 {
   %0 = arith.constant 15 : i32
   %1 = vector.extractelement %arg0[%0 : i32]: vector<[16]xf32>
@@ -1150,6 +1209,8 @@ func.func @extractelement_from_vec_1d_f32_idx_as_index(%arg0: vector<16xf32>) ->
 //       CHECK:   %[[x:.*]] = llvm.extractelement %[[A]][%[[i]] : i64] : vector<16xf32>
 //       CHECK:   return %[[x]] : f32
 
+// -----
+
 func.func @extractelement_from_vec_1d_f32_idx_as_index_scalable(%arg0: vector<[16]xf32>) -> f32 {
   %0 = arith.constant 15 : index
   %1 = vector.extractelement %arg0[%0 : index]: vector<[16]xf32>
@@ -1177,6 +1238,8 @@ func.func @extract_scalar_from_vec_1d_f32(%arg0: vector<16xf32>) -> f32 {
 //       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32>
 //       CHECK:   return {{.*}} : f32
 
+// -----
+
 func.func @extract_scalar_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> f32 {
   %0 = vector.extract %arg0[15]: f32 from vector<[16]xf32>
   return %0 : f32
@@ -1199,6 +1262,8 @@ func.func @extract_vec_1e_from_vec_1d_f32(%arg0: vector<16xf32>) -> vector<1xf32
 //       CHECK:   %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : f32 to vector<1xf32>
 //       CHECK:   return %[[T2]] : vector<1xf32>
 
+// -----
+
 func.func @extract_vec_1e_from_vec_1d_f32_scalable(%arg0: vector<[16]xf32>) -> vector<1xf32> {
   %0 = vector.extract %arg0[15]: vector<1xf32> from vector<[16]xf32>
   return %0 : vector<1xf32>
@@ -1224,6 +1289,8 @@ func.func @extract_scalar_from_vec_1d_index(%arg0: vector<16xindex>) -> index {
 //       CHECK:   %[[T3:.*]] = builtin.unrealized_conversion_cast %[[T2]] : i64 to index
 //       CHECK:   return %[[T3]] : index
 
+// -----
+
 func.func @extract_scalar_from_vec_1d_index_scalable(%arg0: vector<[16]xindex>) -> index {
   %0 = vector.extract %arg0[15]: index from vector<[16]xindex>
   return %0 : index
@@ -1246,6 +1313,8 @@ func.func @extract_vec_2d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<3
 //       CHECK:   llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vector<16xf32>>>
 //       CHECK:   return {{.*}} : vector<3x16xf32>
 
+// -----
+
 func.func @extract_vec_2d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<3x[16]xf32> {
   %0 = vector.extract %arg0[0]: vector<3x[16]xf32> from vector<4x3x[16]xf32>
   return %0 : vector<3x[16]xf32>
@@ -1264,6 +1333,8 @@ func.func @extract_vec_1d_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> vector<1
 //       CHECK:   llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vector<16xf32>>>
 //       CHECK:   return {{.*}} : vector<16xf32>
 
+// -----
+
 func.func @extract_vec_1d_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> vector<[16]xf32> {
   %0 = vector.extract %arg0[0, 0]: vector<[16]xf32> from vector<4x3x[16]xf32>
   return %0 : vector<[16]xf32>
@@ -1284,6 +1355,8 @@ func.func @extract_scalar_from_vec_3d_f32(%arg0: vector<4x3x16xf32>) -> f32 {
 //       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : vector<16xf32>
 //       CHECK:   return {{.*}} : f32
 
+// -----
+
 func.func @extract_scalar_from_vec_3d_f32_scalable(%arg0: vector<4x3x[16]xf32>) -> f32 {
   %0 = vector.extract %arg0[0, 0, 0]: f32 from vector<4x3x[16]xf32>
   return %0 : f32
@@ -1305,6 +1378,8 @@ func.func @extract_scalar_from_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %ar
 //       CHECK:   %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64
 //       CHECK:   llvm.extractelement %[[VEC]][%[[UC]] : i64] : vector<16xf32>
 
+// -----
+
 func.func @extract_scalar_from_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: index) -> f32 {
   %0 = vector.extract %arg0[%arg1]: f32 from vector<[16]xf32>
   return %0 : f32
@@ -1326,6 +1401,8 @@ func.func @extract_scalar_from_vec_2d_f32_dynamic_idx(%arg0: vector<1x16xf32>, %
 // CHECK-LABEL: @extract_scalar_from_vec_2d_f32_dynamic_idx(
 //       CHECK:   vector.extract
 
+// -----
+
 func.func @extract_scalar_from_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: index) -> f32 {
   %0 = vector.extract %arg0[0, %arg1]: f32 from vector<1x[16]xf32>
   return %0 : f32
@@ -1367,6 +1444,8 @@ func.func @insertelement_into_vec_1d_f32_idx_as_i32(%arg0: f32, %arg1: vector<4x
 //       CHECK:   %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : i32] : vector<4xf32>
 //       CHECK:   return %[[x]] : vector<4xf32>
 
+// -----
+
 func.func @insertelement_into_vec_1d_f32_idx_as_i32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> {
   %0 = arith.constant 3 : i32
   %1 = vector.insertelement %arg0, %arg1[%0 : i32] : vector<[4]xf32>
@@ -1394,6 +1473,8 @@ func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index(%arg0: f32, %arg1
 //       CHECK:   %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[i]] : i64] : vector<4xf32>
 //       CHECK:   return %[[x]] : vector<4xf32>
 
+// -----
+
 func.func @insertelement_into_vec_1d_f32_scalable_idx_as_index_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> {
   %0 = arith.constant 3 : index
   %1 = vector.insertelement %arg0, %arg1[%0 : index] : vector<[4]xf32>
@@ -1422,6 +1503,8 @@ func.func @insert_scalar_into_vec_1d_f32(%arg0: f32, %arg1: vector<4xf32>) -> ve
 //       CHECK:   llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32>
 //       CHECK:   return {{.*}} : vector<4xf32>
 
+// -----
+
 func.func @insert_scalar_into_vec_1d_f32_scalable(%arg0: f32, %arg1: vector<[4]xf32>) -> vector<[4]xf32> {
   %0 = vector.insert %arg0, %arg1[3] : f32 into vector<[4]xf32>
   return %0 : vector<[4]xf32>
@@ -1447,6 +1530,7 @@ func.func @insert_scalar_into_vec_1d_index(%arg0: index, %arg1: vector<4xindex>)
 //       CHECK:   %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : vector<4xi64> to vector<4xindex>
 //       CHECK:   return %[[T5]] : vector<4xindex>
 
+// -----
 
 func.func @insert_scalar_into_vec_1d_index_scalable(%arg0: index, %arg1: vector<[4]xindex>) -> vector<[4]xindex> {
   %0 = vector.insert %arg0, %arg1[3] : index into vector<[4]xindex>
@@ -1472,6 +1556,8 @@ func.func @insert_vec_2d_into_vec_3d_f32(%arg0: vector<8x16xf32>, %arg1: vector<
 //       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vector<16xf32>>>
 //       CHECK:   return {{.*}} : vector<4x8x16xf32>
 
+// -----
+
 func.func @insert_vec_2d_into_vec_3d_f32_scalable(%arg0: vector<8x[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> {
   %0 = vector.insert %arg0, %arg1[3] : vector<8x[16]xf32> into vector<4x8x[16]xf32>
   return %0 : vector<4x8x[16]xf32>
@@ -1490,6 +1576,8 @@ func.func @insert_vec_1d_into_vec_3d_f32(%arg0: vector<16xf32>, %arg1: vector<4x
 //       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>>
 //       CHECK:   return {{.*}} : vector<4x8x16xf32>
 
+// -----
+
 func.func @insert_vec_1d_into_vec_3d_f32_scalable(%arg0: vector<[16]xf32>, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7] : vector<[16]xf32> into vector<4x8x[16]xf32>
   return %0 : vector<4x8x[16]xf32>
@@ -1511,6 +1599,8 @@ func.func @insert_scalar_into_vec_3d_f32(%arg0: f32, %arg1: vector<4x8x16xf32>)
 //       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vector<16xf32>>>
 //       CHECK:   return {{.*}} : vector<4x8x16xf32>
 
+// -----
+
 func.func @insert_scalar_into_vec_3d_f32_scalable(%arg0: f32, %arg1: vector<4x8x[16]xf32>) -> vector<4x8x[16]xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x[16]xf32>
   return %0 : vector<4x8x[16]xf32>
@@ -1535,6 +1625,8 @@ func.func @insert_scalar_into_vec_1d_f32_dynamic_idx(%arg0: vector<16xf32>, %arg
 //       CHECK:   %[[UC:.+]] = builtin.unrealized_conversion_cast %[[INDEX]] : index to i64
 //       CHECK:   llvm.insertelement %[[SRC]], %[[DST]][%[[UC]] : i64] : vector<16xf32>
 
+// -----
+
 func.func @insert_scalar_into_vec_1d_f32_dynamic_idx_scalable(%arg0: vector<[16]xf32>, %arg1: f32, %arg2: index)
                                       -> vector<[16]xf32> {
   %0 = vector.insert %arg1, %arg0[%arg2]: f32 into vector<[16]xf32>
@@ -1559,6 +1651,8 @@ func.func @insert_scalar_into_vec_2d_f32_dynamic_idx(%arg0: vector<1x16xf32>, %a
 // CHECK-LABEL: @insert_scalar_into_vec_2d_f32_dynamic_idx(
 //       CHECK:   vector.insert
 
+// -----
+
 func.func @insert_scalar_into_vec_2d_f32_dynamic_idx_scalable(%arg0: vector<1x[16]xf32>, %arg1: f32, %idx: index)
                                         -> vector<1x[16]xf32> {
   %0 = vector.insert %arg1, %arg0[0, %idx]: f32 into vector<1x[16]xf32>
@@ -1863,6 +1957,8 @@ func.func @extract_strided_slice_f32_1d_from_2d(%arg0: vector<4x8xf32>) -> vecto
 //       CHECK:    %[[T5:.*]] = builtin.unrealized_conversion_cast %[[T4]] : !llvm.array<2 x vector<8xf32>> to vector<2x8xf32>
 //       CHECK:    return %[[T5]]
 
+// -----
+
 func.func @extract_strided_slice_f32_1d_from_2d_scalable(%arg0: vector<4x[8]xf32>) -> vector<2x[8]xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x[8]xf32> to vector<2x[8]xf32>
   return %0 : vector<2x[8]xf32>
@@ -1898,6 +1994,8 @@ func.func @extract_strided_slice_f32_2d_from_2d(%arg0: vector<4x8xf32>) -> vecto
 //       CHECK:    %[[VAL_12:.*]] = builtin.unrealized_conversion_cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32>
 //       CHECK:    return %[[VAL_12]] : vector<2x2xf32>
 
+// -----
+
 // NOTE: For scalable vectors, we can only extract "full" scalable dimensions
 // (e.g. [8] from [8], but not [4] from [8]).
 
@@ -1932,6 +2030,8 @@ func.func @insert_strided_slice_f32_2d_into_3d(%b: vector<4x4xf32>, %c: vector<4
 //       CHECK:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x array<4 x vector<4xf32>>>
 //       CHECK:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x array<4 x vector<4xf32>>>
 
+// -----
+
 func.func @insert_strided_slice_f32_2d_into_3d_scalable(%b: vector<4x[4]xf32>, %c: vector<4x4x[4]xf32>) -> vector<4x4x[4]xf32> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x[4]xf32> into vector<4x4x[4]xf32>
   return %0 : vector<4x4x[4]xf32>
@@ -1950,6 +2050,8 @@ func.func @insert_strided_index_slice_index_2d_into_3d(%b: vector<4x4xindex>, %c
 //       CHECK:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x array<4 x vector<4xi64>>>
 //       CHECK:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x array<4 x vector<4xi64>>>
 
+// -----
+
 func.func @insert_strided_index_slice_index_2d_into_3d_scalable(%b: vector<4x[4]xindex>, %c: vector<4x4x[4]xindex>) -> vector<4x4x[4]xindex> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x[4]xindex> into vector<4x4x[4]xindex>
   return %0 : vector<4x4x[4]xindex>
@@ -1983,6 +2085,8 @@ func.func @insert_strided_slice_f32_2d_into_2d(%a: vector<2x2xf32>, %b: vector<4
 //       CHECK:    %[[R4_3:.*]] = llvm.shufflevector %[[R4_2]], %[[V4_3]] [4, 5, 0, 1] : vector<4xf32>
 //       CHECK:    llvm.insertvalue %[[R4_3]], {{.*}}[3] : !llvm.array<4 x vector<4xf32>>
 
+// -----
+
 // NOTE: For scalable dimensions, the corresponding "base" size must match
 // (i.e. we can only insert "full" scalable dimensions, e.g. [2] into [2], but
 // not [2] from [4]).
@@ -2022,6 +2126,8 @@ func.func @insert_strided_slice_f32_2d_into_3d(%arg0: vector<2x4xf32>, %arg1: ve
 //       CHECK:    %[[R8_3:.*]] = llvm.shufflevector %[[R8_2]], %[[V4_0_1]] [8, 9, 0, 1, 2, 3, 14, 15] : vector<8xf32>
 //       CHECK:    llvm.insertvalue %[[R8_3]], {{.*}}[1] : !llvm.array<4 x vector<8xf32>>
 
+// -----
+
 // NOTE: For scalable dimensions, the corresponding "base" size must match
 // (i.e. we can only insert "full" scalable dimensions, e.g. [4] into [4], but
 // not [4] from [8]).
@@ -2054,68 +2160,70 @@ func.func @insert_strided_slice_f32_2d_into_3d_scalable(%arg0: vector<2x[4]xf32>
 // vector.fma
 //===----------------------------------------------------------------------===//
 
-func.func @fma(%a: vector<8xf32>, %b: vector<2x4xf32>, %c: vector<1x1x1xf32>, %d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
+func.func @fma(%vec_1d: vector<8xf32>, %vec_2d: vector<2x4xf32>, %vec_3d: vector<1x1x1xf32>, %vec_0d: vector<f32>) -> (vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>) {
   // CHECK-LABEL: @fma
-  //  CHECK-SAME: %[[A:.*]]: vector<8xf32>
-  //  CHECK-SAME: %[[B:.*]]: vector<2x4xf32>
-  //  CHECK-SAME: %[[C:.*]]: vector<1x1x1xf32>
-  //       CHECK: %[[BL:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
+  //  CHECK-SAME: %[[VEC_1D:.*]]: vector<8xf32>
+  //  CHECK-SAME: %[[VEC_2D:.*]]: vector<2x4xf32>
+  //  CHECK-SAME: %[[VEC_3D:.*]]: vector<1x1x1xf32>
+  //       CHECK: %[[VEC_2D_CAST:.*]] = builtin.unrealized_conversion_cast %[[VEC_2D]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
   //       CHECK: llvm.intr.fmuladd
   //  CHECK-SAME:   (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
-  %0 = vector.fma %a, %a, %a : vector<8xf32>
+  %0 = vector.fma %vec_1d, %vec_1d, %vec_1d : vector<8xf32>
 
-  //       CHECK: %[[b00:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[b01:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[b02:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[B0:.*]] = llvm.intr.fmuladd(%[[b00]], %[[b01]], %[[b02]]) :
+  //       CHECK: %[[VEC_2D_00:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_01:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_02:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_ADD_1:.*]] = llvm.intr.fmuladd(%[[VEC_2D_00]], %[[VEC_2D_01]], %[[VEC_2D_02]]) :
   //  CHECK-SAME: (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
-  //       CHECK: llvm.insertvalue %[[B0]], {{.*}}[0] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[b10:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[b11:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[b12:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
-  //       CHECK: %[[B1:.*]] = llvm.intr.fmuladd(%[[b10]], %[[b11]], %[[b12]]) :
+  //       CHECK: llvm.insertvalue %[[VEC_2D_ADD_1]], {{.*}}[0] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_10:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_11:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_12:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<4xf32>>
+  //       CHECK: %[[VEC_2D_ADD_2:.*]] = llvm.intr.fmuladd(%[[VEC_2D_10]], %[[VEC_2D_11]], %[[VEC_2D_12]]) :
   //  CHECK-SAME: (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
-  //       CHECK: llvm.insertvalue %[[B1]], {{.*}}[1] : !llvm.array<2 x vector<4xf32>>
-  %1 = vector.fma %b, %b, %b : vector<2x4xf32>
+  //       CHECK: llvm.insertvalue %[[VEC_2D_ADD_2]], {{.*}}[1] : !llvm.array<2 x vector<4xf32>>
+  %1 = vector.fma %vec_2d, %vec_2d, %vec_2d : vector<2x4xf32>
 
   //       CHECK: %[[C0:.*]] = llvm.intr.fmuladd
   //  CHECK-SAME:   (vector<1xf32>, vector<1xf32>, vector<1xf32>) -> vector<1xf32>
-  %2 = vector.fma %c, %c, %c : vector<1x1x1xf32>
+  %2 = vector.fma %vec_3d, %vec_3d, %vec_3d : vector<1x1x1xf32>
 
   //       CHECK: %[[D0:.*]] = llvm.intr.fmuladd
   //  CHECK-SAME:   (vector<1xf32>, vector<1xf32>, vector<1xf32>) -> vector<1xf32>
-  %3 = vector.fma %d, %d, %d : vector<f32>
+  %3 = vector.fma %vec_0d, %vec_0d, %vec_0d : vector<f32>
 
   return %0, %1, %2, %3: vector<8xf32>, vector<2x4xf32>, vector<1x1x1xf32>, vector<f32>
 }
 
-func.func @fma_scalable(%a: vector<[8]xf32>, %b: vector<2x[4]xf32>, %c: vector<1x1x[1]xf32>, %d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
+// -----
+
+func.func @fma_scalable(%vec_1d: vector<[8]xf32>, %vec_2d: vector<2x[4]xf32>, %vec_3d: vector<1x1x[1]xf32>, %vec_0d: vector<f32>) -> (vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>) {
   // CHECK-LABEL: @fma_scalable
-  //  CHECK-SAME: %[[A:.*]]: vector<[8]xf32>
-  //  CHECK-SAME: %[[B:.*]]: vector<2x[4]xf32>
-  //  CHECK-SAME: %[[C:.*]]: vector<1x1x[1]xf32>
-  //       CHECK: %[[BL:.*]] = builtin.unrealized_conversion_cast %[[B]] : vector<2x[4]xf32> to !llvm.array<2 x vector<[4]xf32>>
+  //  CHECK-SAME: %[[VEC_1D:.*]]: vector<[8]xf32>
+  //  CHECK-SAME: %[[VEC_2D:.*]]: vector<2x[4]xf32>
+  //  CHECK-SAME: %[[VEC_3D:.*]]: vector<1x1x[1]xf32>
+  //       CHECK: %[[VEC_2D_CAST:.*]] = builtin.unrealized_conversion_cast %[[VEC_2D]] : vector<2x[4]xf32> to !llvm.array<2 x vector<[4]xf32>>
   //       CHECK: llvm.intr.fmuladd
   //  CHECK-SAME:   (vector<[8]xf32>, vector<[8]xf32>, vector<[8]xf32>) -> vector<[8]xf32>
-  %0 = vector.fma %a, %a, %a : vector<[8]xf32>
+  %0 = vector.fma %vec_1d, %vec_1d, %vec_1d : vector<[8]xf32>
 
-  //       CHECK: %[[b00:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[b01:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[b02:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[B0:.*]] = llvm.intr.fmuladd(%[[b00]], %[[b01]], %[[b02]]) :
+  //       CHECK: %[[VEC_2D_00:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_01:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_02:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][0] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_ADD_1:.*]] = llvm.intr.fmuladd(%[[VEC_2D_00]], %[[VEC_2D_01]], %[[VEC_2D_02]]) :
   //  CHECK-SAME: (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32>
-  //       CHECK: llvm.insertvalue %[[B0]], {{.*}}[0] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[b10:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[b11:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[b12:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<[4]xf32>>
-  //       CHECK: %[[B1:.*]] = llvm.intr.fmuladd(%[[b10]], %[[b11]], %[[b12]]) :
+  //       CHECK: llvm.insertvalue %[[VEC_2D_ADD_1]], {{.*}}[0] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_10:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_11:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_12:.*]] = llvm.extractvalue %[[VEC_2D_CAST]][1] : !llvm.array<2 x vector<[4]xf32>>
+  //       CHECK: %[[VEC_2D_ADD_2:.*]] = llvm.intr.fmuladd(%[[VEC_2D_10]], %[[VEC_2D_11]], %[[VEC_2D_12]]) :
   //  CHECK-SAME: (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>) -> vector<[4]xf32>
-  //       CHECK: llvm.insertvalue %[[B1]], {{.*}}[1] : !llvm.array<2 x vector<[4]xf32>>
-  %1 = vector.fma %b, %b, %b : vector<2x[4]xf32>
+  //       CHECK: llvm.insertvalue %[[VEC_2D_ADD_2]], {{.*}}[1] : !llvm.array<2 x vector<[4]xf32>>
+  %1 = vector.fma %vec_2d, %vec_2d, %vec_2d : vector<2x[4]xf32>
 
   //       CHECK: %[[C0:.*]] = llvm.intr.fmuladd
   //  CHECK-SAME:   (vector<[1]xf32>, vector<[1]xf32>, vector<[1]xf32>) -> vector<[1]xf32>
-  %2 = vector.fma %c, %c, %c : vector<1x1x[1]xf32>
+  %2 = vector.fma %vec_3d, %vec_3d, %vec_3d : vector<1x1x[1]xf32>
 
   return %0, %1, %2: vector<[8]xf32>, vector<2x[4]xf32>, vector<1x1x[1]xf32>
 }
@@ -2151,6 +2259,8 @@ func.func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
 // CHECK-SAME: <{fastmathFlags = #llvm.fastmath<none>}> : (f16, vector<16xf16>) -> f16
 //      CHECK: return %[[V]] : f16
 
+// -----
+
 func.func @reduce_f16_scalable(%arg0: vector<[16]xf16>) -> f16 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xf16> into f16
   return %0 : f16
@@ -2175,6 +2285,8 @@ func.func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
 // CHECK-SAME: <{fastmathFlags = #llvm.fastmath<none>}> : (f32, vector<16xf32>) -> f32
 //      CHECK: return %[[V]] : f32
 
+// -----
+
 func.func @reduce_f32_scalable(%arg0: vector<[16]xf32>) -> f32 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xf32> into f32
   return %0 : f32
@@ -2199,6 +2311,8 @@ func.func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
 // CHECK-SAME: <{fastmathFlags = #llvm.fastmath<none>}> : (f64, vector<16xf64>) -> f64
 //      CHECK: return %[[V]] : f64
 
+// -----
+
 func.func @reduce_f64_scalable(%arg0: vector<[16]xf64>) -> f64 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xf64> into f64
   return %0 : f64
@@ -2221,6 +2335,8 @@ func.func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
 //      CHECK: return %[[V]] : i8
 
+// -----
+
 func.func @reduce_i8_scalable(%arg0: vector<[16]xi8>) -> i8 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xi8> into i8
   return %0 : i8
@@ -2241,6 +2357,8 @@ func.func @reduce_i32(%arg0: vector<16xi32>) -> i32 {
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
 //      CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2262,6 +2380,8 @@ func.func @reduce_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.add %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <add>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2283,6 +2403,8 @@ func.func @reduce_mul_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.mul"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_mul_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <mul>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2304,6 +2426,8 @@ func.func @reduce_mul_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.mul %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_mul_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <mul>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2326,6 +2450,8 @@ func.func @reduce_fmaximum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 {
 //      CHECK: %[[R:.*]] = llvm.intr.maximum(%[[V]], %[[B]]) : (f32, f32) -> f32
 //      CHECK: return %[[R]] : f32
 
+// -----
+
 func.func @reduce_fmaximum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 {
   %0 = vector.reduction <maximumf>, %arg0, %arg1 : vector<[16]xf32> into f32
   return %0 : f32
@@ -2348,6 +2474,8 @@ func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 {
 //      CHECK: %[[R:.*]] = llvm.intr.minimum(%[[V]], %[[B]]) : (f32, f32) -> f32
 //      CHECK: return %[[R]] : f32
 
+// -----
+
 func.func @reduce_fminimum_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 {
   %0 = vector.reduction <minimumf>, %arg0, %arg1 : vector<[16]xf32> into f32
   return %0 : f32
@@ -2370,6 +2498,8 @@ func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 {
 //      CHECK: %[[R:.*]] = llvm.intr.maxnum(%[[V]], %[[B]]) : (f32, f32) -> f32
 //      CHECK: return %[[R]] : f32
 
+// -----
+
 func.func @reduce_fmax_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 {
   %0 = vector.reduction <maxnumf>, %arg0, %arg1 : vector<[16]xf32> into f32
   return %0 : f32
@@ -2392,6 +2522,8 @@ func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 {
 //      CHECK: %[[R:.*]] = llvm.intr.minnum(%[[V]], %[[B]]) : (f32, f32) -> f32
 //      CHECK: return %[[R]] : f32
 
+// -----
+
 func.func @reduce_fmin_f32_scalable(%arg0: vector<[16]xf32>, %arg1: f32) -> f32 {
   %0 = vector.reduction <minnumf>, %arg0, %arg1 : vector<[16]xf32> into f32
   return %0 : f32
@@ -2413,6 +2545,8 @@ func.func @reduce_minui_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umin"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_minui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <minui>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2435,6 +2569,8 @@ func.func @reduce_minui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_minui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <minui>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2457,6 +2593,8 @@ func.func @reduce_maxui_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.umax"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_maxui_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <maxui>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2479,6 +2617,8 @@ func.func @reduce_maxui_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_maxui_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <maxui>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2501,6 +2641,8 @@ func.func @reduce_minsi_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smin"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_minsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <minsi>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2523,6 +2665,8 @@ func.func @reduce_minsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_minsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <minsi>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2545,6 +2689,8 @@ func.func @reduce_maxsi_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.smax"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_maxsi_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <maxsi>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2567,6 +2713,8 @@ func.func @reduce_maxsi_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.select %[[S]], %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_maxsi_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <maxsi>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2589,6 +2737,8 @@ func.func @reduce_and_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.and"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_and_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <and>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2610,6 +2760,8 @@ func.func @reduce_and_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.and %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_and_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <and>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2631,6 +2783,8 @@ func.func @reduce_or_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.or"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_or_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <or>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2652,6 +2806,8 @@ func.func @reduce_or_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.or %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_or_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <or>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2673,6 +2829,8 @@ func.func @reduce_xor_i32(%arg0: vector<16xi32>) -> i32 {
 //       CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.xor"(%[[A]])
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_xor_i32_scalable(%arg0: vector<[16]xi32>) -> i32 {
   %0 = vector.reduction <xor>, %arg0 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2694,6 +2852,8 @@ func.func @reduce_xor_acc_i32(%arg0: vector<16xi32>, %arg1 : i32) -> i32 {
 //       CHECK: %[[V:.*]] = llvm.xor %[[ACC]], %[[R]]
 //       CHECK: return %[[V]] : i32
 
+// -----
+
 func.func @reduce_xor_acc_i32_scalable(%arg0: vector<[16]xi32>, %arg1 : i32) -> i32 {
   %0 = vector.reduction <xor>, %arg0, %arg1 : vector<[16]xi32> into i32
   return %0 : i32
@@ -2715,6 +2875,8 @@ func.func @reduce_i64(%arg0: vector<16xi64>) -> i64 {
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.add"(%[[A]])
 //      CHECK: return %[[V]] : i64
 
+// -----
+
 func.func @reduce_i64_scalable(%arg0: vector<[16]xi64>) -> i64 {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xi64> into i64
   return %0 : i64
@@ -2737,6 +2899,8 @@ func.func @reduce_index(%arg0: vector<16xindex>) -> index {
 //      CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index
 //      CHECK: return %[[T2]] : index
 
+// -----
+
 func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index {
   %0 = vector.reduction <add>, %arg0 : vector<[16]xindex> into index
   return %0 : index
@@ -2748,13 +2912,13 @@ func.func @reduce_index_scalable(%arg0: vector<[16]xindex>) -> index {
 //      CHECK: %[[T2:.*]] = builtin.unrealized_conversion_cast %[[T1]] : i64 to index
 //      CHECK: return %[[T2]] : index
 
-//                          4x16                16x3               4x3
 // -----
 
 //===----------------------------------------------------------------------===//
 // vector.matrix_multiply
 //===----------------------------------------------------------------------===//
 
+//                          4x16                16x3               4x3
 func.func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
   %C = vector.matrix_multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } :
@@ -2882,51 +3046,51 @@ func.func @negative_constant_mask_2d_leading_scalable() -> vector<[4]x4xi1> {
 // vector.create_mask
 //===----------------------------------------------------------------------===//
 
-func.func @create_mask_0d(%a : index) -> vector<i1> {
-  %v = vector.create_mask %a : vector<i1>
+func.func @create_mask_0d(%num_elems : index) -> vector<i1> {
+  %v = vector.create_mask %num_elems : vector<i1>
   return %v: vector<i1>
 }
 
 // CHECK-LABEL: func @create_mask_0d
-// CHECK-SAME: %[[arg:.*]]: index
-// CHECK:  %[[indices:.*]] = arith.constant dense<0> : vector<i32>
-// CHECK:  %[[arg_i32:.*]] = arith.index_cast %[[arg]] : index to i32
-// CHECK:  %[[bounds:.*]] = llvm.insertelement %[[arg_i32]]
-// CHECK:  %[[boundsCast:.*]] = builtin.unrealized_conversion_cast %[[bounds]] : vector<1xi32> to vector<i32>
-// CHECK:  %[[result:.*]] = arith.cmpi slt, %[[indices]], %[[boundsCast]] : vector<i32>
-// CHECK:  return %[[result]] : vector<i1>
+// CHECK-SAME: %[[NUM_ELEMS:.*]]: index
+// CHECK:  %[[INDICES:.*]] = arith.constant dense<0> : vector<i32>
+// CHECK:  %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32
+// CHECK:  %[[BOUNDS:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]]
+// CHECK:  %[[BOUNDS_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOUNDS]] : vector<1xi32> to vector<i32>
+// CHECK:  %[[RESULT:.*]] = arith.cmpi slt, %[[INDICES]], %[[BOUNDS_CAST]] : vector<i32>
+// CHECK:  return %[[RESULT]] : vector<i1>
 
 // -----
 
-func.func @create_mask_1d(%a : index) -> vector<4xi1> {
-  %v = vector.create_mask %a : vector<4xi1>
+func.func @create_mask_1d(%num_elems : index) -> vector<4xi1> {
+  %v = vector.create_mask %num_elems : vector<4xi1>
   return %v: vector<4xi1>
 }
 
 // CHECK-LABEL: func @create_mask_1d
-// CHECK-SAME: %[[arg:.*]]: index
-// CHECK:  %[[indices:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xi32>
-// CHECK:  %[[arg_i32:.*]] = arith.index_cast %[[arg]] : index to i32
-// CHECK:  %[[boundsInsert:.*]] = llvm.insertelement %[[arg_i32]]
-// CHECK:  %[[bounds:.*]] = llvm.shufflevector %[[boundsInsert]]
-// CHECK:  %[[result:.*]] = arith.cmpi slt, %[[indices]], %[[bounds]] : vector<4xi32>
-// CHECK:  return %[[result]] : vector<4xi1>
+// CHECK-SAME: %[[NUM_ELEMS:.*]]: index
+// CHECK:  %[[INDICES:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xi32>
+// CHECK:  %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32
+// CHECK:  %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]]
+// CHECK:  %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]]
+// CHECK:  %[[RESULT:.*]] = arith.cmpi slt, %[[INDICES]], %[[BOUNDS]] : vector<4xi32>
+// CHECK:  return %[[RESULT]] : vector<4xi1>
 
 // -----
 
-func.func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
-  %v = vector.create_mask %a : vector<[4]xi1>
+func.func @create_mask_1d_scalable(%num_elems : index) -> vector<[4]xi1> {
+  %v = vector.create_mask %num_elems : vector<[4]xi1>
   return %v: vector<[4]xi1>
 }
 
 // CHECK-LABEL: func @create_mask_1d_scalable
-// CHECK-SAME: %[[arg:.*]]: index
-// CHECK:  %[[indices:.*]] = llvm.intr.stepvector : vector<[4]xi32>
-// CHECK:  %[[arg_i32:.*]] = arith.index_cast %[[arg]] : index to i32
-// CHECK:  %[[boundsInsert:.*]] = llvm.insertelement %[[arg_i32]], {{.*}} : vector<[4]xi32>
-// CHECK:  %[[bounds:.*]] = llvm.shufflevector %[[boundsInsert]], {{.*}} : vector<[4]xi32>
-// CHECK:  %[[result:.*]] = arith.cmpi slt, %[[indices]], %[[bounds]] : vector<[4]xi32>
-// CHECK: return %[[result]] : vector<[4]xi1>
+// CHECK-SAME: %[[NUM_ELEMS:.*]]: index
+// CHECK:  %[[INDICES:.*]] = llvm.intr.stepvector : vector<[4]xi32>
+// CHECK:  %[[NUM_ELEMS_i32:.*]] = arith.index_cast %[[NUM_ELEMS]] : index to i32
+// CHECK:  %[[BOUNDS_INSERT:.*]] = llvm.insertelement %[[NUM_ELEMS_i32]], {{.*}} : vector<[4]xi32>
+// CHECK:  %[[BOUNDS:.*]] = llvm.shufflevector %[[BOUNDS_INSERT]], {{.*}} : vector<[4]xi32>
+// CHECK:  %[[RESULT:.*]] = arith.cmpi slt, %[[INDICES]], %[[BOUNDS]] : vector<[4]xi32>
+// CHECK: return %[[RESULT]] : vector<[4]xi1>
 
 // -----
 
@@ -2980,16 +3144,18 @@ func.func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
 
 // -----
 
-//===----------------------------------------------------------------------===//
-// vector.load
-//===----------------------------------------------------------------------===//
-
 func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
   %0 = vector.flat_transpose %arg0 { rows = 4: i32, columns = 4: i32 }
      : vector<16xf32> -> vector<16xf32>
   return %0 : vector<16xf32>
 }
 
+// -----
+
+//===----------------------------------------------------------------------===//
+// vector.load
+//===----------------------------------------------------------------------===//
+
 // CHECK-LABEL: func @flat_transpose
 // CHECK-SAME:  %[[A:.*]]: vector<16xf32>
 // CHECK:       %[[T:.*]] = llvm.intr.matrix.transpose %[[A]]
@@ -2997,6 +3163,8 @@ func.func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
 // CHECK-SAME:      vector<16xf32> into vector<16xf32>
 // CHECK:       return %[[T]] : vector<16xf32>
 
+// -----
+
 func.func @load(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
@@ -3802,6 +3970,8 @@ func.func @deinterleave_1d(%a: vector<4xi32>) -> (vector<2xi32>, vector<2xi32>)
   return %0, %1 : vector<2xi32>, vector<2xi32>
 }
 
+// -----
+
 // CHECK-LABEL: @deinterleave_1d_scalable
 // CHECK-SAME:  %[[SRC:.*]]: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>)
 func.func @deinterleave_1d_scalable(%a: vector<[4]xi32>) -> (vector<[2]xi32>, vector<[2]xi32>) {
@@ -3812,6 +3982,8 @@ func.func @deinterleave_1d_scalable(%a: vector<[4]xi32>) -> (vector<[2]xi32>, ve
     return %0, %1 : vector<[2]xi32>, vector<[2]xi32>
 }
 
+// -----
+
 // CHECK-LABEL: @deinterleave_2d
 // CHECK-SAME: %[[SRC:.*]]: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>)
 func.func @deinterleave_2d(%a: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4xf32>) {
@@ -3821,6 +3993,8 @@ func.func @deinterleave_2d(%a: vector<2x8xf32>) -> (vector<2x4xf32>, vector<2x4x
   return %0, %1 : vector<2x4xf32>, vector<2x4xf32>
 }
 
+// -----
+
 func.func @deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[4]xf32>, vector<2x[4]xf32>) {
     // CHECK: llvm.intr.vector.deinterleave2
     // CHECK-NOT: vector.deinterleave %{{.*}} : vector<2x[8]xf32>
@@ -3835,31 +4009,31 @@ func.func @deinterleave_2d_scalable(%a: vector<2x[8]xf32>) -> (vector<2x[4]xf32>
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: func.func @from_elements_1d(
-//  CHECK-SAME:     %[[a:.*]]: f32, %[[b:.*]]: f32)
-//       CHECK:   %[[undef:.*]] = llvm.mlir.undef : vector<3xf32>
-//       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : i64) : i64
-//       CHECK:   %[[insert0:.*]] = llvm.insertelement %[[a]], %[[undef]][%[[c0]] : i64] : vector<3xf32>
-//       CHECK:   %[[c1:.*]] = llvm.mlir.constant(1 : i64) : i64
-//       CHECK:   %[[insert1:.*]] = llvm.insertelement %[[b]], %[[insert0]][%[[c1]] : i64] : vector<3xf32>
-//       CHECK:   %[[c2:.*]] = llvm.mlir.constant(2 : i64) : i64
-//       CHECK:   %[[insert2:.*]] = llvm.insertelement %[[a]], %[[insert1]][%[[c2]] : i64] : vector<3xf32>
-//       CHECK:   return %[[insert2]]
-func.func @from_elements_1d(%a: f32, %b: f32) -> vector<3xf32> {
-  %0 = vector.from_elements %a, %b, %a : vector<3xf32>
+//  CHECK-SAME:     %[[ARG_0:.*]]: f32, %[[ARG_1:.*]]: f32)
+//       CHECK:   %[[UNDEF:.*]] = llvm.mlir.undef : vector<3xf32>
+//       CHECK:   %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+//       CHECK:   %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[UNDEF]][%[[C0]] : i64] : vector<3xf32>
+//       CHECK:   %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
+//       CHECK:   %[[INSERT1:.*]] = llvm.insertelement %[[ARG_1]], %[[INSERT0]][%[[C1]] : i64] : vector<3xf32>
+//       CHECK:   %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
+//       CHECK:   %[[INSERT2:.*]] = llvm.insertelement %[[ARG_0]], %[[INSERT1]][%[[C2]] : i64] : vector<3xf32>
+//       CHECK:   return %[[INSERT2]]
+func.func @from_elements_1d(%arg0: f32, %arg1: f32) -> vector<3xf32> {
+  %0 = vector.from_elements %arg0, %arg1, %arg0 : vector<3xf32>
   return %0 : vector<3xf32>
 }
 
 // -----
 
 // CHECK-LABEL: func.func @from_elements_0d(
-//  CHECK-SAME:     %[[a:.*]]: f32)
-//       CHECK:   %[[undef:.*]] = llvm.mlir.undef : vector<1xf32>
-//       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : i64) : i64
-//       CHECK:   %[[insert0:.*]] = llvm.insertelement %[[a]], %[[undef]][%[[c0]] : i64] : vector<1xf32>
-//       CHECK:   %[[cast:.*]] = builtin.unrealized_conversion_cast %[[insert0]] : vector<1xf32> to vector<f32>
-//       CHECK:   return %[[cast]]
-func.func @from_elements_0d(%a: f32) -> vector<f32> {
-  %0 = vector.from_elements %a : vector<f32>
+//  CHECK-SAME:     %[[ARG_0:.*]]: f32)
+//       CHECK:   %[[UNDEF:.*]] = llvm.mlir.undef : vector<1xf32>
+//       CHECK:   %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
+//       CHECK:   %[[INSERT0:.*]] = llvm.insertelement %[[ARG_0]], %[[UNDEF]][%[[C0]] : i64] : vector<1xf32>
+//       CHECK:   %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[INSERT0]] : vector<1xf32> to vector<f32>
+//       CHECK:   return %[[CAST]]
+func.func @from_elements_0d(%arg0: f32) -> vector<f32> {
+  %0 = vector.from_elements %arg0 : vector<f32>
   return %0 : vector<f32>
 }
 
@@ -3869,6 +4043,16 @@ func.func @from_elements_0d(%a: f32) -> vector<f32> {
 // vector.step
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @step
+// CHECK: %[[CST:.+]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex>
+// CHECK: return %[[CST]] : vector<4xindex>
+func.func @step() -> vector<4xindex> {
+  %0 = vector.step : vector<4xindex>
+  return %0 : vector<4xindex>
+}
+
+// -----
+
 // CHECK-LABEL: @step_scalable
 // CHECK: %[[STEPVECTOR:.*]] = llvm.intr.stepvector : vector<[4]xi64>
 // CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex>
@@ -3877,13 +4061,3 @@ func.func @step_scalable() -> vector<[4]xindex> {
   %0 = vector.step : vector<[4]xindex>
   return %0 : vector<[4]xindex>
 }
-
-// -----
-
-// CHECK-LABEL: @step
-// CHECK: %[[CST:.+]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex>
-// CHECK: return %[[CST]] : vector<4xindex>
-func.func @step() -> vector<4xindex> {
-  %0 = vector.step : vector<4xindex>
-  return %0 : vector<4xindex>
-}



More information about the Mlir-commits mailing list