[Mlir-commits] [mlir] f003b28 - [MLIR][SPIRVToLLVM] Indentation and style fix in tests

George Mitenkov llvmlistbot at llvm.org
Tue Aug 4 04:32:21 PDT 2020


Author: George Mitenkov
Date: 2020-08-04T14:30:49+03:00
New Revision: f003b28363f9e09f3f68a5e08ebcca2635a4cdca

URL: https://github.com/llvm/llvm-project/commit/f003b28363f9e09f3f68a5e08ebcca2635a4cdca
DIFF: https://github.com/llvm/llvm-project/commit/f003b28363f9e09f3f68a5e08ebcca2635a4cdca.diff

LOG: [MLIR][SPIRVToLLVM] Indentation and style fix in tests

This is a first patch that sweeps over tests to fix
indentation (tabs to spaces). It also adds label checks and
removes redundant matching of `%{{.*}} = `.

The following tests have been fixed:
- arithmetic-ops-to-llvm
- bitwise-ops-to-llvm
- cast-ops-to-llvm
- comparison-ops-to-llvm
- logical-ops-to-llvm (renamed to match the rest)

Reviewed By: ftynse

Differential Revision: https://reviews.llvm.org/D85181

Added: 
    mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir

Modified: 
    mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir

Removed: 
    mlir/test/Conversion/SPIRVToLLVM/logical-to-llvm.mlir


################################################################################
diff  --git a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
index 2f5d18a6acf8..8ae0d9840713 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
@@ -4,206 +4,232 @@
 // spv.IAdd
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @iadd_scalar
 func @iadd_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.add %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.IAdd %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.IAdd %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @iadd_vector
 func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) {
-	// CHECK: %{{.*}} = llvm.add %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
-	%0 = spv.IAdd %arg0, %arg1 : vector<4xi64>
-	return
+  // CHECK: llvm.add %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
+  %0 = spv.IAdd %arg0, %arg1 : vector<4xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ISub
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @isub_scalar
 func @isub_scalar(%arg0: i8, %arg1: i8) {
-	// CHECK: %{{.*}} = llvm.sub %{{.*}}, %{{.*}} : !llvm.i8
-	%0 = spv.ISub %arg0, %arg1 : i8
-	return
+  // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm.i8
+  %0 = spv.ISub %arg0, %arg1 : i8
+  return
 }
 
+// CHECK-LABEL: @isub_vector
 func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) {
-	// CHECK: %{{.*}} = llvm.sub %{{.*}}, %{{.*}} : !llvm<"<2 x i16>">
-	%0 = spv.ISub %arg0, %arg1 : vector<2xi16>
-	return
+  // CHECK: llvm.sub %{{.*}}, %{{.*}} : !llvm<"<2 x i16>">
+  %0 = spv.ISub %arg0, %arg1 : vector<2xi16>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.IMul
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @imul_scalar
 func @imul_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.mul %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.IMul %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.IMul %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @imul_vector
 func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) {
-	// CHECK: %{{.*}} = llvm.mul %{{.*}}, %{{.*}} : !llvm<"<3 x i32>">
-	%0 = spv.IMul %arg0, %arg1 : vector<3xi32>
-	return
+  // CHECK: llvm.mul %{{.*}}, %{{.*}} : !llvm<"<3 x i32>">
+  %0 = spv.IMul %arg0, %arg1 : vector<3xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FAdd
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fadd_scalar
 func @fadd_scalar(%arg0: f16, %arg1: f16) {
-	// CHECK: %{{.*}} = llvm.fadd %{{.*}}, %{{.*}} : !llvm.half
-	%0 = spv.FAdd %arg0, %arg1 : f16
-	return
+  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.half
+  %0 = spv.FAdd %arg0, %arg1 : f16
+  return
 }
 
+// CHECK-LABEL: @fadd_vector
 func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) {
-	// CHECK: %{{.*}} = llvm.fadd %{{.*}}, %{{.*}} : !llvm<"<4 x float>">
-	%0 = spv.FAdd %arg0, %arg1 : vector<4xf32>
-	return
+  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm<"<4 x float>">
+  %0 = spv.FAdd %arg0, %arg1 : vector<4xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FSub
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fsub_scalar
 func @fsub_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fsub %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FSub %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FSub %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @fsub_vector
 func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) {
-	// CHECK: %{{.*}} = llvm.fsub %{{.*}}, %{{.*}} : !llvm<"<2 x float>">
-	%0 = spv.FSub %arg0, %arg1 : vector<2xf32>
-	return
+  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm<"<2 x float>">
+  %0 = spv.FSub %arg0, %arg1 : vector<2xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FDiv
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fdiv_scalar
 func @fdiv_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fdiv %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FDiv %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FDiv %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @fdiv_vector
 func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) {
-	// CHECK: %{{.*}} = llvm.fdiv %{{.*}}, %{{.*}} : !llvm<"<3 x double>">
-	%0 = spv.FDiv %arg0, %arg1 : vector<3xf64>
-	return
+  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm<"<3 x double>">
+  %0 = spv.FDiv %arg0, %arg1 : vector<3xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FMul
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fmul_scalar
 func @fmul_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fmul %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FMul %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FMul %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @fmul_vector
 func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) {
-	// CHECK: %{{.*}} = llvm.fmul %{{.*}}, %{{.*}} : !llvm<"<2 x float>">
-	%0 = spv.FMul %arg0, %arg1 : vector<2xf32>
-	return
+  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm<"<2 x float>">
+  %0 = spv.FMul %arg0, %arg1 : vector<2xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FRem
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @frem_scalar
 func @frem_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.frem %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FRem %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FRem %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @frem_vector
 func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) {
-	// CHECK: %{{.*}} = llvm.frem %{{.*}}, %{{.*}} : !llvm<"<3 x double>">
-	%0 = spv.FRem %arg0, %arg1 : vector<3xf64>
-	return
+  // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm<"<3 x double>">
+  %0 = spv.FRem %arg0, %arg1 : vector<3xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FNegate
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fneg_scalar
 func @fneg_scalar(%arg: f64) {
-	// CHECK: %{{.*}} = llvm.fneg %{{.*}} : !llvm.double
-	%0 = spv.FNegate %arg : f64
-	return
+  // CHECK: llvm.fneg %{{.*}} : !llvm.double
+  %0 = spv.FNegate %arg : f64
+  return
 }
 
+// CHECK-LABEL: @fneg_vector
 func @fneg_vector(%arg: vector<2xf32>) {
-	// CHECK: %{{.*}} = llvm.fneg %{{.*}} : !llvm<"<2 x float>">
-	%0 = spv.FNegate %arg : vector<2xf32>
-	return
+  // CHECK: llvm.fneg %{{.*}} : !llvm<"<2 x float>">
+  %0 = spv.FNegate %arg : vector<2xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.UDiv
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @udiv_scalar
 func @udiv_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.udiv %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.UDiv %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.UDiv %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @udiv_vector
 func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) {
-	// CHECK: %{{.*}} = llvm.udiv %{{.*}}, %{{.*}} : !llvm<"<3 x i64>">
-	%0 = spv.UDiv %arg0, %arg1 : vector<3xi64>
-	return
+  // CHECK: llvm.udiv %{{.*}}, %{{.*}} : !llvm<"<3 x i64>">
+  %0 = spv.UDiv %arg0, %arg1 : vector<3xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.UMod
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @umod_scalar
 func @umod_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.urem %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.UMod %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.UMod %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @umod_vector
 func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) {
-	// CHECK: %{{.*}} = llvm.urem %{{.*}}, %{{.*}} : !llvm<"<3 x i64>">
-	%0 = spv.UMod %arg0, %arg1 : vector<3xi64>
-	return
+  // CHECK: llvm.urem %{{.*}}, %{{.*}} : !llvm<"<3 x i64>">
+  %0 = spv.UMod %arg0, %arg1 : vector<3xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SDiv
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @sdiv_scalar
 func @sdiv_scalar(%arg0: i16, %arg1: i16) {
-	// CHECK: %{{.*}} = llvm.sdiv %{{.*}}, %{{.*}} : !llvm.i16
-	%0 = spv.SDiv %arg0, %arg1 : i16
-	return
+  // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm.i16
+  %0 = spv.SDiv %arg0, %arg1 : i16
+  return
 }
 
+// CHECK-LABEL: @sdiv_vector
 func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.sdiv %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.SDiv %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.SDiv %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SRem
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @srem_scalar
 func @srem_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.srem %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.SRem %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.SRem %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @srem_vector
 func @srem_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) {
-	// CHECK: %{{.*}} = llvm.srem %{{.*}}, %{{.*}} : !llvm<"<4 x i32>">
-	%0 = spv.SRem %arg0, %arg1 : vector<4xi32>
-	return
+  // CHECK: llvm.srem %{{.*}}, %{{.*}} : !llvm<"<4 x i32>">
+  %0 = spv.SRem %arg0, %arg1 : vector<4xi32>
+  return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
index 31ffc6dbf7dc..aed82d218db2 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
@@ -4,309 +4,321 @@
 // spv.BitCount
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitcount_scalar
 func @bitcount_scalar(%arg0: i16) {
-	// CHECK: %{{.*}} = "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i16) -> !llvm.i16
-	%0 = spv.BitCount %arg0: i16
-	return
+  // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm.i16) -> !llvm.i16
+  %0 = spv.BitCount %arg0: i16
+  return
 }
 
+// CHECK-LABEL: @bitcount_vector
 func @bitcount_vector(%arg0: vector<3xi32>) {
-	// CHECK: %{{.*}} = "llvm.intr.ctpop"(%{{.*}}) : (!llvm<"<3 x i32>">) -> !llvm<"<3 x i32>">
-	%0 = spv.BitCount %arg0: vector<3xi32>
-	return
+  // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (!llvm<"<3 x i32>">) -> !llvm<"<3 x i32>">
+  %0 = spv.BitCount %arg0: vector<3xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitReverse
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitreverse_scalar
 func @bitreverse_scalar(%arg0: i64) {
-	// CHECK: %{{.*}} = "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i64) -> !llvm.i64
-	%0 = spv.BitReverse %arg0: i64
-	return
+  // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm.i64) -> !llvm.i64
+  %0 = spv.BitReverse %arg0: i64
+  return
 }
 
+// CHECK-LABEL: @bitreverse_vector
 func @bitreverse_vector(%arg0: vector<4xi32>) {
-	// CHECK: %{{.*}} = "llvm.intr.bitreverse"(%{{.*}}) : (!llvm<"<4 x i32>">) -> !llvm<"<4 x i32>">
-	%0 = spv.BitReverse %arg0: vector<4xi32>
-	return
+  // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (!llvm<"<4 x i32>">) -> !llvm<"<4 x i32>">
+  %0 = spv.BitReverse %arg0: vector<4xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitFieldInsert
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: func @bitfield_insert_scalar_same_bit_width
+// CHECK-LABEL: @bitfield_insert_scalar_same_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[INSERT:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
 func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offset: i32, %count: i32) {
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
-    // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
-    // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : !llvm.i32
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i32
-    // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i32
-    // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : !llvm.i32
-    // CHECK: %{{.*}} = llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i32
-    %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
-    return
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
+  // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
+  // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET]] : !llvm.i32
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i32
+  // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i32
+  // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : !llvm.i32
+  // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i32
+  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
+  return
 }
 
-// CHECK-LABEL: func @bitfield_insert_scalar_smaller_bit_width
+// CHECK-LABEL: @bitfield_insert_scalar_smaller_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[INSERT:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8
 func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %offset: i8, %count: i8) {
-    // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i64
-    // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i64
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : !llvm.i64
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i64
-    // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i64
-    // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : !llvm.i64
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i64
-    // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i64
-    // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : !llvm.i64
-    // CHECK: %{{.*}} = llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i64
-    %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8
-    return
+  // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i64
+  // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i64
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : !llvm.i64
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i64
+  // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i64
+  // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[EXT_OFFSET]] : !llvm.i64
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i64
+  // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i64
+  // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : !llvm.i64
+  // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i64
+  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8
+  return
 }
 
-// CHECK-LABEL: func @bitfield_insert_scalar_greater_bit_width
+// CHECK-LABEL: @bitfield_insert_scalar_greater_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i16, %[[INSERT:.*]]: !llvm.i16, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i64
 func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %offset: i32, %count: i64) {
-    // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i32 to !llvm.i16
-    // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i16
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : !llvm.i16
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : !llvm.i16
-    // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i16
-    // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : !llvm.i16
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i16
-    // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i16
-    // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : !llvm.i16
-    // CHECK: %{{.*}} = llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i16
-    %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64
-    return
+  // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i32 to !llvm.i16
+  // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i16
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : !llvm.i16
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[TRUNC_COUNT]] : !llvm.i16
+  // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i16
+  // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[TRUNC_OFFSET]] : !llvm.i16
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm.i16
+  // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm.i16
+  // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : !llvm.i16
+  // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm.i16
+  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64
+  return
 }
 
-// CHECK-LABEL: func @bitfield_insert_vector
+// CHECK-LABEL: @bitfield_insert_vector
 // CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[INSERT:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
 func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %offset: i32, %count: i32) {
-    // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>">
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
-    // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
-    // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm<"<2 x i32>">
-    // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %{{.*}} = llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm<"<2 x i32>">
-    %0 = spv.BitFieldInsert %base, %insert, %offset, %count : vector<2xi32>, i32, i32
-    return
+  // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>">
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>">
+  // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
+  // CHECK: %[[T2:.*]] = llvm.shl %[[T1]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T2]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
+  // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : !llvm<"<2 x i32>">
+  // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
+  // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : !llvm<"<2 x i32>">
+  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : vector<2xi32>, i32, i32
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitFieldSExtract
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: func @bitfield_sextract_scalar_same_bit_width
+// CHECK-LABEL: @bitfield_sextract_scalar_same_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i64, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64
 func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) {
-    // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : !llvm.i64
-    // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : !llvm.i64
-    // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i64
-    // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i64
-    // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : !llvm.i64
-    // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i64
-    %0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64
-    return
+  // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : !llvm.i64
+  // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : !llvm.i64
+  // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i64
+  // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i64
+  // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : !llvm.i64
+  // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i64
+  %0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64
+  return
 }
 
-// CHECK-LABEL: func @bitfield_sextract_scalar_smaller_bit_width
+// CHECK-LABEL: @bitfield_sextract_scalar_smaller_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i8, %[[COUNT:.*]]: !llvm.i8
 func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) {
-    // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i32
-    // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
-    // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
-    // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm.i32
-    // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
-    // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
-    // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm.i32
-    // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
-    %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8
-    return
+  // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i8 to !llvm.i32
+  // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
+  // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
+  // CHECK: %[[T0:.*]] = llvm.add %[[EXT_COUNT]], %[[EXT_OFFSET]] : !llvm.i32
+  // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
+  // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
+  // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : !llvm.i32
+  // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
+  %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8
+  return
 }
 
-// CHECK-LABEL: func @bitfield_sextract_scalar_greater_bit_width
+// CHECK-LABEL: @bitfield_sextract_scalar_greater_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i64, %[[COUNT:.*]]: !llvm.i64
 func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) {
-    // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i64 to !llvm.i32
-    // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i32
-    // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
-    // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm.i32
-    // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
-    // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
-    // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm.i32
-    // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
-    %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64
-    return
+  // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i64 to !llvm.i32
+  // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : !llvm.i64 to !llvm.i32
+  // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : !llvm.i32
+  // CHECK: %[[T0:.*]] = llvm.add %[[TRUNC_COUNT]], %[[TRUNC_OFFSET]] : !llvm.i32
+  // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm.i32
+  // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm.i32
+  // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : !llvm.i32
+  // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm.i32
+  %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64
+  return
 }
 
-// CHECK-LABEL: func @bitfield_sextract_vector
+// CHECK-LABEL: @bitfield_sextract_vector
 // CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
 func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) {
-    // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm<"<2 x i32>">
-    // CHECK: %[[T0:.*]] = llvm.add %[[COUNT_V2]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm<"<2 x i32>">
-    // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm<"<2 x i32>">
-    // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET_V2]], %[[T1]] : !llvm<"<2 x i32>">
-    // CHECK: %{{.*}} = llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm<"<2 x i32>">
-    %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i32, i32
-    return
+  // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(dense<32> : vector<2xi32>) : !llvm<"<2 x i32>">
+  // CHECK: %[[T0:.*]] = llvm.add %[[COUNT_V2]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
+  // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : !llvm<"<2 x i32>">
+  // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : !llvm<"<2 x i32>">
+  // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET_V2]], %[[T1]] : !llvm<"<2 x i32>">
+  // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : !llvm<"<2 x i32>">
+  %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i32, i32
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitFieldUExtract
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: func @bitfield_uextract_scalar_same_bit_width
+// CHECK-LABEL: @bitfield_uextract_scalar_same_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
 func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) {
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
-    // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : !llvm.i32
-    // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
-    %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
-    return
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i32
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
+  // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : !llvm.i32
+  // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
+  %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
+  return
 }
 
-// CHECK-LABEL: func @bitfield_uextract_scalar_smaller_bit_width
+// CHECK-LABEL: @bitfield_uextract_scalar_smaller_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i32, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8
 func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) {
-    // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i16 to !llvm.i32
-    // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i32
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
-    // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm.i32
-    // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
-    %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8
-    return
+  // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : !llvm.i16 to !llvm.i32
+  // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : !llvm.i8 to !llvm.i32
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[EXT_COUNT]] : !llvm.i32
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i32
+  // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : !llvm.i32
+  // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i32
+  %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8
+  return
 }
 
-// CHECK-LABEL: func @bitfield_uextract_scalar_greater_bit_width
+// CHECK-LABEL: @bitfield_uextract_scalar_greater_bit_width
 // CHECK-SAME: %[[BASE:.*]]: !llvm.i8, %[[OFFSET:.*]]: !llvm.i16, %[[COUNT:.*]]: !llvm.i8
 func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) {
-    // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i16 to !llvm.i8
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : !llvm.i8
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i8
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i8
-    // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : !llvm.i8
-    // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i8
-    %0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8
-    return
+  // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : !llvm.i16 to !llvm.i8
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : !llvm.i8
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : !llvm.i8
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm.i8
+  // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : !llvm.i8
+  // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm.i8
+  %0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8
+  return
 }
 
-// CHECK-LABEL: func @bitfield_uextract_vector
+// CHECK-LABEL: @bitfield_uextract_vector
 // CHECK-SAME: %[[BASE:.*]]: !llvm<"<2 x i32>">, %[[OFFSET:.*]]: !llvm.i32, %[[COUNT:.*]]: !llvm.i32
 func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) {
-    // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
-    // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
-    // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
-    // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>">
-    // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
-    // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
-    // CHECK: %{{.*}} = llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm<"<2 x i32>">
-    %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i32, i32
-    return
+  // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[OFFSET_V2:.*]] = llvm.insertelement  %[[OFFSET]], %[[OFFSET_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[COUNT_V0:.*]] = llvm.mlir.undef : !llvm<"<2 x i32>">
+  // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V1:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V0]][%[[ZERO]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : !llvm.i32
+  // CHECK: %[[COUNT_V2:.*]] = llvm.insertelement %[[COUNT]], %[[COUNT_V1]][%[[ONE]] : !llvm.i32] : !llvm<"<2 x i32>">
+  // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi32>) : !llvm<"<2 x i32>">
+  // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT_V2]] : !llvm<"<2 x i32>">
+  // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : !llvm<"<2 x i32>">
+  // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET_V2]] : !llvm<"<2 x i32>">
+  // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : !llvm<"<2 x i32>">
+  %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i32, i32
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitwiseAnd
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitwise_and_scalar
 func @bitwise_and_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.and %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.BitwiseAnd %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.BitwiseAnd %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @bitwise_and_vector
 func @bitwise_and_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) {
-	// CHECK: %{{.*}} = llvm.and %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
-	%0 = spv.BitwiseAnd %arg0, %arg1 : vector<4xi64>
-	return
+  // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
+  %0 = spv.BitwiseAnd %arg0, %arg1 : vector<4xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitwiseOr
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitwise_or_scalar
 func @bitwise_or_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.or %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.BitwiseOr %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.BitwiseOr %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @bitwise_or_vector
 func @bitwise_or_vector(%arg0: vector<3xi8>, %arg1: vector<3xi8>) {
-	// CHECK: %{{.*}} = llvm.or %{{.*}}, %{{.*}} : !llvm<"<3 x i8>">
-	%0 = spv.BitwiseOr %arg0, %arg1 : vector<3xi8>
-	return
+  // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm<"<3 x i8>">
+  %0 = spv.BitwiseOr %arg0, %arg1 : vector<3xi8>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.BitwiseXor
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitwise_xor_scalar
 func @bitwise_xor_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.xor %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.BitwiseXor %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.xor %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.BitwiseXor %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @bitwise_xor_vector
 func @bitwise_xor_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) {
-	// CHECK: %{{.*}} = llvm.xor %{{.*}}, %{{.*}} : !llvm<"<2 x i16>">
-	%0 = spv.BitwiseXor %arg0, %arg1 : vector<2xi16>
-	return
+  // CHECK: llvm.xor %{{.*}}, %{{.*}} : !llvm<"<2 x i16>">
+  %0 = spv.BitwiseXor %arg0, %arg1 : vector<2xi16>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.Not
 //===----------------------------------------------------------------------===//
 
-func @not__scalar(%arg0: i32) {
+// CHECK-LABEL: @not_scalar
+func @not_scalar(%arg0: i32) {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : !llvm.i32
-  // CHECK: %{{.*}} = llvm.xor %{{.*}}, %[[CONST]] : !llvm.i32
-	%0 = spv.Not %arg0 : i32
+  // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i32
+  %0 = spv.Not %arg0 : i32
   return
 }
 
+// CHECK-LABEL: @not_vector
 func @not_vector(%arg0: vector<2xi16>) {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi16>) : !llvm<"<2 x i16>">
-  // CHECK: %{{.*}} = llvm.xor %{{.*}}, %[[CONST]] : !llvm<"<2 x i16>">
-	%0 = spv.Not %arg0 : vector<2xi16>
+  // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm<"<2 x i16>">
+  %0 = spv.Not %arg0 : vector<2xi16>
   return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
index 29f43fadf933..a0434aad3bea 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
@@ -4,168 +4,188 @@
 // spv.Bitcast
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @bitcast_float_to_integer_scalar
 func @bitcast_float_to_integer_scalar(%arg0 : f32) {
-	// CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.float to !llvm.i32
-	%0 = spv.Bitcast %arg0: f32 to i32
-	return
+  // CHECK: llvm.bitcast {{.*}} : !llvm.float to !llvm.i32
+  %0 = spv.Bitcast %arg0: f32 to i32
+  return
 }
 
+// CHECK-LABEL: @bitcast_float_to_integer_vector
 func @bitcast_float_to_integer_vector(%arg0 : vector<3xf32>) {
-	// CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm<"<3 x float>"> to !llvm<"<3 x i32>">
-	%0 = spv.Bitcast %arg0: vector<3xf32> to vector<3xi32>
-	return
+  // CHECK: llvm.bitcast {{.*}} : !llvm<"<3 x float>"> to !llvm<"<3 x i32>">
+  %0 = spv.Bitcast %arg0: vector<3xf32> to vector<3xi32>
+  return
 }
 
+// CHECK-LABEL: @bitcast_vector_to_scalar
 func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) {
-	// CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm<"<2 x float>"> to !llvm.i64
-	%0 = spv.Bitcast %arg0: vector<2xf32> to i64
-	return
+  // CHECK: llvm.bitcast {{.*}} : !llvm<"<2 x float>"> to !llvm.i64
+  %0 = spv.Bitcast %arg0: vector<2xf32> to i64
+  return
 }
 
+// CHECK-LABEL: @bitcast_scalar_to_vector
 func @bitcast_scalar_to_vector(%arg0 : f64) {
-	// CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.double to !llvm<"<2 x i32>">
-	%0 = spv.Bitcast %arg0: f64 to vector<2xi32>
-	return
+  // CHECK: llvm.bitcast {{.*}} : !llvm.double to !llvm<"<2 x i32>">
+  %0 = spv.Bitcast %arg0: f64 to vector<2xi32>
+  return
 }
 
+// CHECK-LABEL: @bitcast_vector_to_vector
 func @bitcast_vector_to_vector(%arg0 : vector<4xf32>) {
-	// CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm<"<4 x float>"> to !llvm<"<2 x i64>">
-	%0 = spv.Bitcast %arg0: vector<4xf32> to vector<2xi64>
-	return
+  // CHECK: llvm.bitcast {{.*}} : !llvm<"<4 x float>"> to !llvm<"<2 x i64>">
+  %0 = spv.Bitcast %arg0: vector<4xf32> to vector<2xi64>
+  return
 }
 
+// CHECK-LABEL: @bitcast_pointer
 func @bitcast_pointer(%arg0: !spv.ptr<f32, Function>) {
-	// CHECK: %{{.*}} = llvm.bitcast %{{.*}} : !llvm<"float*"> to !llvm<"i32*">
-	%0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
-	return
+  // CHECK: llvm.bitcast %{{.*}} : !llvm<"float*"> to !llvm<"i32*">
+  %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ConvertFToS
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @convert_float_to_signed_scalar
 func @convert_float_to_signed_scalar(%arg0: f32) {
-	// CHECK: %{{.*}} = llvm.fptosi %{{.*}} : !llvm.float to !llvm.i32
-    %0 = spv.ConvertFToS %arg0: f32 to i32
-	return
+  // CHECK: llvm.fptosi %{{.*}} : !llvm.float to !llvm.i32
+  %0 = spv.ConvertFToS %arg0: f32 to i32
+  return
 }
 
+// CHECK-LABEL: @convert_float_to_signed_vector
 func @convert_float_to_signed_vector(%arg0: vector<2xf32>) {
-	// CHECK: %{{.*}} = llvm.fptosi %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x i32>">
-    %0 = spv.ConvertFToS %arg0: vector<2xf32> to vector<2xi32>
-	return
+  // CHECK: llvm.fptosi %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x i32>">
+  %0 = spv.ConvertFToS %arg0: vector<2xf32> to vector<2xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ConvertFToU
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @convert_float_to_unsigned_scalar
 func @convert_float_to_unsigned_scalar(%arg0: f32) {
-	// CHECK: %{{.*}} = llvm.fptoui %{{.*}} : !llvm.float to !llvm.i32
-    %0 = spv.ConvertFToU %arg0: f32 to i32
-	return
+  // CHECK: llvm.fptoui %{{.*}} : !llvm.float to !llvm.i32
+  %0 = spv.ConvertFToU %arg0: f32 to i32
+  return
 }
 
+// CHECK-LABEL: @convert_float_to_unsigned_vector
 func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) {
-	// CHECK: %{{.*}} = llvm.fptoui %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x i32>">
-    %0 = spv.ConvertFToU %arg0: vector<2xf32> to vector<2xi32>
-	return
+  // CHECK: llvm.fptoui %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x i32>">
+  %0 = spv.ConvertFToU %arg0: vector<2xf32> to vector<2xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ConvertSToF
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @convert_signed_to_float_scalar
 func @convert_signed_to_float_scalar(%arg0: i32) {
-	// CHECK: %{{.*}} = llvm.sitofp %{{.*}} : !llvm.i32 to !llvm.float
-    %0 = spv.ConvertSToF %arg0: i32 to f32
-	return
+  // CHECK: llvm.sitofp %{{.*}} : !llvm.i32 to !llvm.float
+  %0 = spv.ConvertSToF %arg0: i32 to f32
+  return
 }
 
+// CHECK-LABEL: @convert_signed_to_float_vector
 func @convert_signed_to_float_vector(%arg0: vector<3xi32>) {
-	// CHECK: %{{.*}} = llvm.sitofp %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x float>">
-    %0 = spv.ConvertSToF %arg0: vector<3xi32> to vector<3xf32>
-	return
+  // CHECK: llvm.sitofp %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x float>">
+  %0 = spv.ConvertSToF %arg0: vector<3xi32> to vector<3xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ConvertUToF
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @convert_unsigned_to_float_scalar
 func @convert_unsigned_to_float_scalar(%arg0: i32) {
-	// CHECK: %{{.*}} = llvm.uitofp %{{.*}} : !llvm.i32 to !llvm.float
-    %0 = spv.ConvertUToF %arg0: i32 to f32
-	return
+  // CHECK: llvm.uitofp %{{.*}} : !llvm.i32 to !llvm.float
+  %0 = spv.ConvertUToF %arg0: i32 to f32
+  return
 }
 
+// CHECK-LABEL: @convert_unsigned_to_float_vector
 func @convert_unsigned_to_float_vector(%arg0: vector<3xi32>) {
-	// CHECK: %{{.*}} = llvm.uitofp %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x float>">
-    %0 = spv.ConvertUToF %arg0: vector<3xi32> to vector<3xf32>
-	return
+  // CHECK: llvm.uitofp %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x float>">
+  %0 = spv.ConvertUToF %arg0: vector<3xi32> to vector<3xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FConvert
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @fconvert_scalar
 func @fconvert_scalar(%arg0: f32, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fpext %{{.*}} : !llvm.float to !llvm.double
-    %0 = spv.FConvert %arg0: f32 to f64
+  // CHECK: llvm.fpext %{{.*}} : !llvm.float to !llvm.double
+  %0 = spv.FConvert %arg0: f32 to f64
 
-    // CHECK: %{{.*}} = llvm.fptrunc %{{.*}} : !llvm.double to !llvm.float
-    %1 = spv.FConvert %arg1: f64 to f32
-	return
+  // CHECK: llvm.fptrunc %{{.*}} : !llvm.double to !llvm.float
+  %1 = spv.FConvert %arg1: f64 to f32
+  return
 }
 
+// CHECK-LABEL: @fconvert_vector
 func @fconvert_vector(%arg0: vector<2xf32>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fpext %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x double>">
-    %0 = spv.FConvert %arg0: vector<2xf32> to vector<2xf64>
+  // CHECK: llvm.fpext %{{.*}} : !llvm<"<2 x float>"> to !llvm<"<2 x double>">
+  %0 = spv.FConvert %arg0: vector<2xf32> to vector<2xf64>
 
-    // CHECK: %{{.*}} = llvm.fptrunc %{{.*}} : !llvm<"<2 x double>"> to !llvm<"<2 x float>">
-    %1 = spv.FConvert %arg1: vector<2xf64> to vector<2xf32>
-	return
+  // CHECK: llvm.fptrunc %{{.*}} : !llvm<"<2 x double>"> to !llvm<"<2 x float>">
+  %1 = spv.FConvert %arg1: vector<2xf64> to vector<2xf32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SConvert
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @sconvert_scalar
 func @sconvert_scalar(%arg0: i32, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
-    %0 = spv.SConvert %arg0: i32 to i64
+  // CHECK: llvm.sext %{{.*}} : !llvm.i32 to !llvm.i64
+  %0 = spv.SConvert %arg0: i32 to i64
 
-    // CHECK: %{{.*}} = llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
-    %1 = spv.SConvert %arg1: i64 to i32
-	return
+  // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
+  %1 = spv.SConvert %arg1: i64 to i32
+  return
 }
 
+// CHECK-LABEL: @sconvert_vector
 func @sconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) {
-	// CHECK: %{{.*}} = llvm.sext %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x i64>">
-    %0 = spv.SConvert %arg0: vector<3xi32> to vector<3xi64>
+  // CHECK: llvm.sext %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x i64>">
+  %0 = spv.SConvert %arg0: vector<3xi32> to vector<3xi64>
 
-    // CHECK: %{{.*}} = llvm.trunc %{{.*}} : !llvm<"<3 x i64>"> to !llvm<"<3 x i32>">
-    %1 = spv.SConvert %arg1: vector<3xi64> to vector<3xi32>
-	return
+  // CHECK: llvm.trunc %{{.*}} : !llvm<"<3 x i64>"> to !llvm<"<3 x i32>">
+  %1 = spv.SConvert %arg1: vector<3xi64> to vector<3xi32>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.UConvert
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @uconvert_scalar
 func @uconvert_scalar(%arg0: i32, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.zext %{{.*}} : !llvm.i32 to !llvm.i64
-    %0 = spv.UConvert %arg0: i32 to i64
+  // CHECK: llvm.zext %{{.*}} : !llvm.i32 to !llvm.i64
+  %0 = spv.UConvert %arg0: i32 to i64
 
-    // CHECK: %{{.*}} = llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
-    %1 = spv.UConvert %arg1: i64 to i32
-	return
+  // CHECK: llvm.trunc %{{.*}} : !llvm.i64 to !llvm.i32
+  %1 = spv.UConvert %arg1: i64 to i32
+  return
 }
 
+// CHECK-LABEL: @uconvert_vector
 func @uconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) {
-	// CHECK: %{{.*}} = llvm.zext %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x i64>">
-    %0 = spv.UConvert %arg0: vector<3xi32> to vector<3xi64>
+  // CHECK: llvm.zext %{{.*}} : !llvm<"<3 x i32>"> to !llvm<"<3 x i64>">
+  %0 = spv.UConvert %arg0: vector<3xi32> to vector<3xi64>
 
-    // CHECK: %{{.*}} = llvm.trunc %{{.*}} : !llvm<"<3 x i64>"> to !llvm<"<3 x i32>">
-    %1 = spv.UConvert %arg1: vector<3xi64> to vector<3xi32>
-	return
+  // CHECK: llvm.trunc %{{.*}} : !llvm<"<3 x i64>"> to !llvm<"<3 x i32>">
+  %1 = spv.UConvert %arg1: vector<3xi64> to vector<3xi32>
+  return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
index c4241e226a1b..bcfe234f7080 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
@@ -4,190 +4,214 @@
 // spv.IEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @i_equal_scalar
 func @i_equal_scalar(%arg0: i32, %arg1: i32) {
-	// CHECK: %{{.*}} = llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i32
-	%0 = spv.IEqual %arg0, %arg1 : i32
-	return
+  // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i32
+  %0 = spv.IEqual %arg0, %arg1 : i32
+  return
 }
 
+// CHECK-LABEL: @i_equal_vector
 func @i_equal_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
-	%0 = spv.IEqual %arg0, %arg1 : vector<4xi64>
-	return
+  // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm<"<4 x i64>">
+  %0 = spv.IEqual %arg0, %arg1 : vector<4xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.INotEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @i_not_equal_scalar
 func @i_not_equal_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.INotEqual %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.INotEqual %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @i_not_equal_vector
 func @i_not_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.INotEqual %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.INotEqual %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @s_greater_than_equal_scalar
 func @s_greater_than_equal_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.SGreaterThanEqual %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.SGreaterThanEqual %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @s_greater_than_equal_vector
 func @s_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.SGreaterThanEqual %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.SGreaterThanEqual %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SGreaterThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @s_greater_than_scalar
 func @s_greater_than_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.SGreaterThan %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.SGreaterThan %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @s_greater_than_vector
 func @s_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.SGreaterThan %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.SGreaterThan %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SLessThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @s_less_than_equal_scalar
 func @s_less_than_equal_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.SLessThanEqual %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.SLessThanEqual %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @s_less_than_equal_vector
 func @s_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.SLessThanEqual %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.SLessThanEqual %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.SLessThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @s_less_than_scalar
 func @s_less_than_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.SLessThan %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.SLessThan %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @s_less_than_vector
 func @s_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.SLessThan %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.SLessThan %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.UGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @u_greater_than_equal_scalar
 func @u_greater_than_equal_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.UGreaterThanEqual %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.UGreaterThanEqual %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @u_greater_than_equal_vector
 func @u_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.UGreaterThanEqual %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.UGreaterThanEqual %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.UGreaterThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @u_greater_than_scalar
 func @u_greater_than_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.UGreaterThan %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.UGreaterThan %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @u_greater_than_vector
 func @u_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.UGreaterThan %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.UGreaterThan %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ULessThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @u_less_than_equal_scalar
 func @u_less_than_equal_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.ULessThanEqual %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.ULessThanEqual %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @u_less_than_equal_vector
 func @u_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.ULessThanEqual %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.ULessThanEqual %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.ULessThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @u_less_than_scalar
 func @u_less_than_scalar(%arg0: i64, %arg1: i64) {
-	// CHECK: %{{.*}} = llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm.i64
-	%0 = spv.ULessThan %arg0, %arg1 : i64
-	return
+  // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm.i64
+  %0 = spv.ULessThan %arg0, %arg1 : i64
+  return
 }
 
+// CHECK-LABEL: @u_less_than_vector
 func @u_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) {
-	// CHECK: %{{.*}} = llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
-	%0 = spv.ULessThan %arg0, %arg1 : vector<2xi64>
-	return
+  // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : !llvm<"<2 x i64>">
+  %0 = spv.ULessThan %arg0, %arg1 : vector<2xi64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FOrdEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_equal_scalar
 func @f_ord_equal_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FOrdEqual %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FOrdEqual %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @f_ord_equal_vector
 func @f_ord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
-	%0 = spv.FOrdEqual %arg0, %arg1 : vector<4xf64>
-	return
+  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
+  %0 = spv.FOrdEqual %arg0, %arg1 : vector<4xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FOrdGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_greater_than_equal_scalar
 func @f_ord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_ord_greater_than_equal_vector
 func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+	// CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
 	%0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : vector<2xf64>
 	return
 }
@@ -196,158 +220,178 @@ func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>
 // spv.FOrdGreaterThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_greater_than_scalar
 func @f_ord_greater_than_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FOrdGreaterThan %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FOrdGreaterThan %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_ord_greater_than_vector
 func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FOrdGreaterThan %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FOrdGreaterThan %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FOrdLessThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_less_than_scalar
 func @f_ord_less_than_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FOrdLessThan %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FOrdLessThan %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_ord_less_than_vector
 func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FOrdLessThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_less_than_equal_scalar
 func @f_ord_less_than_equal_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FOrdLessThanEqual %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FOrdLessThanEqual %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_ord_less_than_equal_vector
 func @f_ord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FOrdLessThanEqual %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FOrdLessThanEqual %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FOrdNotEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_ord_not_equal_scalar
 func @f_ord_not_equal_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FOrdNotEqual %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FOrdNotEqual %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @f_ord_not_equal_vector
 func @f_ord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
-	%0 = spv.FOrdNotEqual %arg0, %arg1 : vector<4xf64>
-	return
+  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
+  %0 = spv.FOrdNotEqual %arg0, %arg1 : vector<4xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_equal_scalar
 func @f_unord_equal_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FUnordEqual %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FUnordEqual %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @f_unord_equal_vector
 func @f_unord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
-	%0 = spv.FUnordEqual %arg0, %arg1 : vector<4xf64>
-	return
+  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
+  %0 = spv.FUnordEqual %arg0, %arg1 : vector<4xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_greater_than_equal_scalar
 func @f_unord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_unord_greater_than_equal_vector
 func @f_unord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordGreaterThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_greater_than_scalar
 func @f_unord_greater_than_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FUnordGreaterThan %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FUnordGreaterThan %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_unord_greater_than_vector
 func @f_unord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FUnordGreaterThan %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FUnordGreaterThan %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordLessThan
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_less_than_scalar
 func @f_unord_less_than_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FUnordLessThan %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FUnordLessThan %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_unord_less_than_vector
 func @f_unord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FUnordLessThan %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FUnordLessThan %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordLessThanEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_less_than_equal_scalar
 func @f_unord_less_than_equal_scalar(%arg0: f64, %arg1: f64) {
-	// CHECK: %{{.*}} = llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm.double
-	%0 = spv.FUnordLessThanEqual %arg0, %arg1 : f64
-	return
+  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm.double
+  %0 = spv.FUnordLessThanEqual %arg0, %arg1 : f64
+  return
 }
 
+// CHECK-LABEL: @f_unord_less_than_equal_vector
 func @f_unord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
-	%0 = spv.FUnordLessThanEqual %arg0, %arg1 : vector<2xf64>
-	return
+  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm<"<2 x double>">
+  %0 = spv.FUnordLessThanEqual %arg0, %arg1 : vector<2xf64>
+  return
 }
 
 //===----------------------------------------------------------------------===//
 // spv.FUnordNotEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @f_unord_not_equal_scalar
 func @f_unord_not_equal_scalar(%arg0: f32, %arg1: f32) {
-	// CHECK: %{{.*}} = llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm.float
-	%0 = spv.FUnordNotEqual %arg0, %arg1 : f32
-	return
+  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm.float
+  %0 = spv.FUnordNotEqual %arg0, %arg1 : f32
+  return
 }
 
+// CHECK-LABEL: @f_unord_not_equal_vector
 func @f_unord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) {
-	// CHECK: %{{.*}} = llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
-	%0 = spv.FUnordNotEqual %arg0, %arg1 : vector<4xf64>
-	return
+  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm<"<4 x double>">
+  %0 = spv.FUnordNotEqual %arg0, %arg1 : vector<4xf64>
+  return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/logical-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
similarity index 55%
rename from mlir/test/Conversion/SPIRVToLLVM/logical-to-llvm.mlir
rename to mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
index e6f2ec2433fa..a816424d5b1c 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/logical-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
@@ -4,15 +4,17 @@
 // spv.LogicalEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @logical_equal_scalar
 func @logical_equal_scalar(%arg0: i1, %arg1: i1) {
-  // CHECK: %{{.*}} = llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i1
-	%0 = spv.LogicalEqual %arg0, %arg0 : i1
+  // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm.i1
+  %0 = spv.LogicalEqual %arg0, %arg0 : i1
   return
 }
 
+// CHECK-LABEL: @logical_equal_vector
 func @logical_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
-  // CHECK: %{{.*}} = llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
-	%0 = spv.LogicalEqual %arg0, %arg0 : vector<4xi1>
+  // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
+  %0 = spv.LogicalEqual %arg0, %arg0 : vector<4xi1>
   return
 }
 
@@ -20,15 +22,17 @@ func @logical_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
 // spv.LogicalNotEqual
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @logical_not_equal_scalar
 func @logical_not_equal_scalar(%arg0: i1, %arg1: i1) {
-  // CHECK: %{{.*}} = llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i1
-	%0 = spv.LogicalNotEqual %arg0, %arg0 : i1
+  // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm.i1
+  %0 = spv.LogicalNotEqual %arg0, %arg0 : i1
   return
 }
 
+// CHECK-LABEL: @logical_not_equal_vector
 func @logical_not_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
-  // CHECK: %{{.*}} = llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
-	%0 = spv.LogicalNotEqual %arg0, %arg0 : vector<4xi1>
+  // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
+  %0 = spv.LogicalNotEqual %arg0, %arg0 : vector<4xi1>
   return
 }
 
@@ -36,17 +40,19 @@ func @logical_not_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
 // spv.LogicalNot
 //===----------------------------------------------------------------------===//
 
-func @logical_not__scalar(%arg0: i1) {
+// CHECK-LABEL: @logical_not_scalar
+func @logical_not_scalar(%arg0: i1) {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : !llvm.i1
-  // CHECK: %{{.*}} = llvm.xor %{{.*}}, %[[CONST]] : !llvm.i1
-	%0 = spv.LogicalNot %arg0 : i1
+  // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm.i1
+  %0 = spv.LogicalNot %arg0 : i1
   return
 }
 
+// CHECK-LABEL: @logical_not_vector
 func @logical_not_vector(%arg0: vector<4xi1>) {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(dense<true> : vector<4xi1>) : !llvm<"<4 x i1>">
-  // CHECK: %{{.*}} = llvm.xor %{{.*}}, %[[CONST]] : !llvm<"<4 x i1>">
-	%0 = spv.LogicalNot %arg0 : vector<4xi1>
+  // CHECK: llvm.xor %{{.*}}, %[[CONST]] : !llvm<"<4 x i1>">
+  %0 = spv.LogicalNot %arg0 : vector<4xi1>
   return
 }
 
@@ -54,15 +60,17 @@ func @logical_not_vector(%arg0: vector<4xi1>) {
 // spv.LogicalAnd
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @logical_and_scalar
 func @logical_and_scalar(%arg0: i1, %arg1: i1) {
-  // CHECK: %{{.*}} = llvm.and %{{.*}}, %{{.*}} : !llvm.i1
-	%0 = spv.LogicalAnd %arg0, %arg0 : i1
+  // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm.i1
+  %0 = spv.LogicalAnd %arg0, %arg0 : i1
   return
 }
 
+// CHECK-LABEL: @logical_and_vector
 func @logical_and_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
-  // CHECK: %{{.*}} = llvm.and %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
-	%0 = spv.LogicalAnd %arg0, %arg0 : vector<4xi1>
+  // CHECK: llvm.and %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
+  %0 = spv.LogicalAnd %arg0, %arg0 : vector<4xi1>
   return
 }
 
@@ -70,14 +78,16 @@ func @logical_and_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
 // spv.LogicalOr
 //===----------------------------------------------------------------------===//
 
+// CHECK-LABEL: @logical_or_scalar
 func @logical_or_scalar(%arg0: i1, %arg1: i1) {
-  // CHECK: %{{.*}} = llvm.or %{{.*}}, %{{.*}} : !llvm.i1
-	%0 = spv.LogicalOr %arg0, %arg0 : i1
+  // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm.i1
+  %0 = spv.LogicalOr %arg0, %arg0 : i1
   return
 }
 
+// CHECK-LABEL: @logical_or_vector
 func @logical_or_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) {
-  // CHECK: %{{.*}} = llvm.or %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
-	%0 = spv.LogicalOr %arg0, %arg0 : vector<4xi1>
+  // CHECK: llvm.or %{{.*}}, %{{.*}} : !llvm<"<4 x i1>">
+  %0 = spv.LogicalOr %arg0, %arg0 : vector<4xi1>
   return
 }


        


More information about the Mlir-commits mailing list